summaryrefslogtreecommitdiffstats
path: root/collectors
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2018-11-07 12:22:44 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2018-11-07 12:22:44 +0000
commit1e6c93250172946eeb38e94a92a1fd12c9d3011e (patch)
tree8ca5e16dfc7ad6b3bf2738ca0a48408a950f8f7e /collectors
parentUpdate watch file (diff)
downloadnetdata-1e6c93250172946eeb38e94a92a1fd12c9d3011e.tar.xz
netdata-1e6c93250172946eeb38e94a92a1fd12c9d3011e.zip
Merging upstream version 1.11.0+dfsg.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'collectors')
-rw-r--r--collectors/Makefile.am28
-rw-r--r--collectors/Makefile.in663
-rw-r--r--collectors/README.md118
-rw-r--r--collectors/all.h318
-rw-r--r--collectors/apps.plugin/Makefile.am13
-rw-r--r--collectors/apps.plugin/Makefile.in521
-rw-r--r--collectors/apps.plugin/README.md372
-rw-r--r--collectors/apps.plugin/apps_groups.conf286
-rw-r--r--collectors/apps.plugin/apps_plugin.c3799
-rw-r--r--collectors/cgroups.plugin/Makefile.am21
-rw-r--r--collectors/cgroups.plugin/Makefile.in563
-rw-r--r--collectors/cgroups.plugin/README.md187
-rw-r--r--collectors/cgroups.plugin/cgroup-name.sh196
-rwxr-xr-xcollectors/cgroups.plugin/cgroup-name.sh.in196
-rwxr-xr-xcollectors/cgroups.plugin/cgroup-network-helper.sh258
-rw-r--r--collectors/cgroups.plugin/cgroup-network.c682
-rw-r--r--collectors/cgroups.plugin/sys_fs_cgroup.c2771
-rw-r--r--collectors/cgroups.plugin/sys_fs_cgroup.h31
-rw-r--r--collectors/charts.d.plugin/Makefile.am62
-rw-r--r--collectors/charts.d.plugin/Makefile.in953
-rw-r--r--collectors/charts.d.plugin/README.md193
-rw-r--r--collectors/charts.d.plugin/ap/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/ap/README.md84
-rw-r--r--collectors/charts.d.plugin/ap/ap.chart.sh182
-rw-r--r--collectors/charts.d.plugin/ap/ap.conf23
-rw-r--r--collectors/charts.d.plugin/apache/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/apache/README.md127
-rw-r--r--collectors/charts.d.plugin/apache/apache.chart.sh258
-rw-r--r--collectors/charts.d.plugin/apache/apache.conf30
-rw-r--r--collectors/charts.d.plugin/apcupsd/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/apcupsd/README.md0
-rw-r--r--collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh201
-rw-r--r--collectors/charts.d.plugin/apcupsd/apcupsd.conf25
-rw-r--r--collectors/charts.d.plugin/charts.d.conf63
-rwxr-xr-xcollectors/charts.d.plugin/charts.d.dryrun-helper.sh78
-rw-r--r--collectors/charts.d.plugin/charts.d.plugin743
-rwxr-xr-xcollectors/charts.d.plugin/charts.d.plugin.in743
-rw-r--r--collectors/charts.d.plugin/cpu_apps/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/cpu_apps/README.md2
-rw-r--r--collectors/charts.d.plugin/cpu_apps/cpu_apps.chart.sh72
-rw-r--r--collectors/charts.d.plugin/cpu_apps/cpu_apps.conf19
-rw-r--r--collectors/charts.d.plugin/cpufreq/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/cpufreq/README.md2
-rw-r--r--collectors/charts.d.plugin/cpufreq/cpufreq.chart.sh90
-rw-r--r--collectors/charts.d.plugin/cpufreq/cpufreq.conf24
-rw-r--r--collectors/charts.d.plugin/example/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/example/README.md2
-rw-r--r--collectors/charts.d.plugin/example/example.chart.sh126
-rw-r--r--collectors/charts.d.plugin/example/example.conf21
-rw-r--r--collectors/charts.d.plugin/exim/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/exim/README.md2
-rw-r--r--collectors/charts.d.plugin/exim/exim.chart.sh48
-rw-r--r--collectors/charts.d.plugin/exim/exim.conf24
-rw-r--r--collectors/charts.d.plugin/hddtemp/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/hddtemp/README.md28
-rw-r--r--collectors/charts.d.plugin/hddtemp/hddtemp.chart.sh77
-rw-r--r--collectors/charts.d.plugin/hddtemp/hddtemp.conf23
-rw-r--r--collectors/charts.d.plugin/libreswan/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/libreswan/README.md42
-rw-r--r--collectors/charts.d.plugin/libreswan/libreswan.chart.sh176
-rw-r--r--collectors/charts.d.plugin/libreswan/libreswan.conf29
-rw-r--r--collectors/charts.d.plugin/load_average/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/load_average/README.md2
-rw-r--r--collectors/charts.d.plugin/load_average/load_average.chart.sh71
-rw-r--r--collectors/charts.d.plugin/load_average/load_average.conf22
-rw-r--r--collectors/charts.d.plugin/loopsleepms.sh.inc237
-rw-r--r--collectors/charts.d.plugin/mem_apps/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/mem_apps/README.md2
-rw-r--r--collectors/charts.d.plugin/mem_apps/mem_apps.chart.sh63
-rw-r--r--collectors/charts.d.plugin/mem_apps/mem_apps.conf19
-rw-r--r--collectors/charts.d.plugin/mysql/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/mysql/README.md81
-rw-r--r--collectors/charts.d.plugin/mysql/mysql.chart.sh528
-rw-r--r--collectors/charts.d.plugin/mysql/mysql.conf23
-rw-r--r--collectors/charts.d.plugin/nginx/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/nginx/README.md2
-rw-r--r--collectors/charts.d.plugin/nginx/nginx.chart.sh144
-rw-r--r--collectors/charts.d.plugin/nginx/nginx.conf23
-rw-r--r--collectors/charts.d.plugin/nut/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/nut/README.md59
-rw-r--r--collectors/charts.d.plugin/nut/nut.chart.sh241
-rw-r--r--collectors/charts.d.plugin/nut/nut.conf33
-rw-r--r--collectors/charts.d.plugin/opensips/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/opensips/README.md0
-rw-r--r--collectors/charts.d.plugin/opensips/opensips.chart.sh326
-rw-r--r--collectors/charts.d.plugin/opensips/opensips.conf21
-rw-r--r--collectors/charts.d.plugin/phpfpm/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/phpfpm/README.md2
-rw-r--r--collectors/charts.d.plugin/phpfpm/phpfpm.chart.sh198
-rw-r--r--collectors/charts.d.plugin/phpfpm/phpfpm.conf27
-rw-r--r--collectors/charts.d.plugin/postfix/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/postfix/README.md26
-rw-r--r--collectors/charts.d.plugin/postfix/postfix.chart.sh89
-rw-r--r--collectors/charts.d.plugin/postfix/postfix.conf25
-rw-r--r--collectors/charts.d.plugin/sensors/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/sensors/README.md52
-rw-r--r--collectors/charts.d.plugin/sensors/sensors.chart.sh255
-rw-r--r--collectors/charts.d.plugin/sensors/sensors.conf32
-rw-r--r--collectors/charts.d.plugin/squid/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/squid/README.md66
-rw-r--r--collectors/charts.d.plugin/squid/squid.chart.sh147
-rw-r--r--collectors/charts.d.plugin/squid/squid.conf26
-rw-r--r--collectors/charts.d.plugin/tomcat/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/tomcat/README.md2
-rw-r--r--collectors/charts.d.plugin/tomcat/tomcat.chart.sh150
-rw-r--r--collectors/charts.d.plugin/tomcat/tomcat.conf38
-rw-r--r--collectors/checks.plugin/Makefile.am4
-rw-r--r--collectors/checks.plugin/Makefile.in457
-rw-r--r--collectors/checks.plugin/plugin_checks.c129
-rw-r--r--collectors/checks.plugin/plugin_checks.h29
-rw-r--r--collectors/diskspace.plugin/Makefile.am8
-rw-r--r--collectors/diskspace.plugin/Makefile.in464
-rw-r--r--collectors/diskspace.plugin/README.md5
-rw-r--r--collectors/diskspace.plugin/plugin_diskspace.c465
-rw-r--r--collectors/diskspace.plugin/plugin_diskspace.h34
-rw-r--r--collectors/fping.plugin/Makefile.am24
-rw-r--r--collectors/fping.plugin/Makefile.in591
-rw-r--r--collectors/fping.plugin/README.md96
-rw-r--r--collectors/fping.plugin/fping.conf44
-rw-r--r--collectors/fping.plugin/fping.plugin200
-rwxr-xr-xcollectors/fping.plugin/fping.plugin.in200
-rw-r--r--collectors/freebsd.plugin/Makefile.am5
-rw-r--r--collectors/freebsd.plugin/Makefile.in457
-rw-r--r--collectors/freebsd.plugin/freebsd_devstat.c780
-rw-r--r--collectors/freebsd.plugin/freebsd_getifaddrs.c618
-rw-r--r--collectors/freebsd.plugin/freebsd_getmntinfo.c301
-rw-r--r--collectors/freebsd.plugin/freebsd_ipfw.c372
-rw-r--r--collectors/freebsd.plugin/freebsd_kstat_zfs.c300
-rw-r--r--collectors/freebsd.plugin/freebsd_sysctl.c3188
-rw-r--r--collectors/freebsd.plugin/plugin_freebsd.c175
-rw-r--r--collectors/freebsd.plugin/plugin_freebsd.h74
-rw-r--r--collectors/freeipmi.plugin/Makefile.am8
-rw-r--r--collectors/freeipmi.plugin/Makefile.in464
-rw-r--r--collectors/freeipmi.plugin/README.md180
-rw-r--r--collectors/freeipmi.plugin/freeipmi_plugin.c1760
-rw-r--r--collectors/idlejitter.plugin/Makefile.am8
-rw-r--r--collectors/idlejitter.plugin/Makefile.in464
-rw-r--r--collectors/idlejitter.plugin/README.md13
-rw-r--r--collectors/idlejitter.plugin/plugin_idlejitter.c92
-rw-r--r--collectors/idlejitter.plugin/plugin_idlejitter.h21
-rw-r--r--collectors/macos.plugin/Makefile.am4
-rw-r--r--collectors/macos.plugin/Makefile.in457
-rw-r--r--collectors/macos.plugin/macos_fw.c687
-rw-r--r--collectors/macos.plugin/macos_mach_smi.c241
-rw-r--r--collectors/macos.plugin/macos_sysctl.c1492
-rw-r--r--collectors/macos.plugin/plugin_macos.c69
-rw-r--r--collectors/macos.plugin/plugin_macos.h43
-rw-r--r--collectors/nfacct.plugin/Makefile.am8
-rw-r--r--collectors/nfacct.plugin/Makefile.in464
-rw-r--r--collectors/nfacct.plugin/README.md10
-rw-r--r--collectors/nfacct.plugin/plugin_nfacct.c822
-rw-r--r--collectors/nfacct.plugin/plugin_nfacct.h30
-rw-r--r--collectors/node.d.plugin/Makefile.am59
-rw-r--r--collectors/node.d.plugin/Makefile.in805
-rw-r--r--collectors/node.d.plugin/README.md218
-rw-r--r--collectors/node.d.plugin/fronius/Makefile.inc13
-rw-r--r--collectors/node.d.plugin/fronius/README.md120
-rw-r--r--collectors/node.d.plugin/fronius/fronius.node.js400
-rw-r--r--collectors/node.d.plugin/named/Makefile.inc13
-rw-r--r--collectors/node.d.plugin/named/README.md342
-rw-r--r--collectors/node.d.plugin/named/named.node.js610
-rw-r--r--collectors/node.d.plugin/node.d.conf39
-rw-r--r--collectors/node.d.plugin/node.d.plugin303
-rwxr-xr-xcollectors/node.d.plugin/node.d.plugin.in303
-rw-r--r--collectors/node.d.plugin/node_modules/asn1-ber.js7
-rw-r--r--collectors/node.d.plugin/node_modules/extend.js88
-rw-r--r--collectors/node.d.plugin/node_modules/lib/ber/errors.js10
-rw-r--r--collectors/node.d.plugin/node_modules/lib/ber/index.js18
-rw-r--r--collectors/node.d.plugin/node_modules/lib/ber/reader.js270
-rw-r--r--collectors/node.d.plugin/node_modules/lib/ber/types.js35
-rw-r--r--collectors/node.d.plugin/node_modules/lib/ber/writer.js318
-rw-r--r--collectors/node.d.plugin/node_modules/net-snmp.js1465
-rw-r--r--collectors/node.d.plugin/node_modules/netdata.js654
-rw-r--r--collectors/node.d.plugin/node_modules/pixl-xml.js607
-rw-r--r--collectors/node.d.plugin/sma_webbox/Makefile.inc13
-rw-r--r--collectors/node.d.plugin/sma_webbox/README.md25
-rw-r--r--collectors/node.d.plugin/sma_webbox/sma_webbox.node.js238
-rw-r--r--collectors/node.d.plugin/snmp/Makefile.inc13
-rw-r--r--collectors/node.d.plugin/snmp/README.md357
-rw-r--r--collectors/node.d.plugin/snmp/snmp.node.js516
-rw-r--r--collectors/node.d.plugin/stiebeleltron/Makefile.inc13
-rw-r--r--collectors/node.d.plugin/stiebeleltron/README.md505
-rw-r--r--collectors/node.d.plugin/stiebeleltron/stiebeleltron.node.js197
-rw-r--r--collectors/plugins.d/Makefile.am11
-rw-r--r--collectors/plugins.d/Makefile.in647
-rw-r--r--collectors/plugins.d/README.md472
-rw-r--r--collectors/plugins.d/plugins_d.c696
-rw-r--r--collectors/plugins.d/plugins_d.h73
-rw-r--r--collectors/proc.plugin/Makefile.am8
-rw-r--r--collectors/proc.plugin/Makefile.in464
-rw-r--r--collectors/proc.plugin/README.md200
-rw-r--r--collectors/proc.plugin/ipc.c263
-rw-r--r--collectors/proc.plugin/plugin_proc.c217
-rw-r--r--collectors/proc.plugin/plugin_proc.h74
-rw-r--r--collectors/proc.plugin/proc_diskstats.c1649
-rw-r--r--collectors/proc.plugin/proc_interrupts.c248
-rw-r--r--collectors/proc.plugin/proc_loadavg.c124
-rw-r--r--collectors/proc.plugin/proc_meminfo.c519
-rw-r--r--collectors/proc.plugin/proc_net_dev.c912
-rw-r--r--collectors/proc.plugin/proc_net_ip_vs_stats.c133
-rw-r--r--collectors/proc.plugin/proc_net_netstat.c818
-rw-r--r--collectors/proc.plugin/proc_net_rpc_nfs.c454
-rw-r--r--collectors/proc.plugin/proc_net_rpc_nfsd.c1006
-rw-r--r--collectors/proc.plugin/proc_net_sctp_snmp.c352
-rw-r--r--collectors/proc.plugin/proc_net_snmp.c1085
-rw-r--r--collectors/proc.plugin/proc_net_snmp6.c1268
-rw-r--r--collectors/proc.plugin/proc_net_sockstat.c518
-rw-r--r--collectors/proc.plugin/proc_net_sockstat6.c273
-rw-r--r--collectors/proc.plugin/proc_net_softnet_stat.c151
-rw-r--r--collectors/proc.plugin/proc_net_stat_conntrack.c351
-rw-r--r--collectors/proc.plugin/proc_net_stat_synproxy.c185
-rw-r--r--collectors/proc.plugin/proc_self_mountinfo.c403
-rw-r--r--collectors/proc.plugin/proc_self_mountinfo.h57
-rw-r--r--collectors/proc.plugin/proc_softirqs.c242
-rw-r--r--collectors/proc.plugin/proc_spl_kstat_zfs.c155
-rw-r--r--collectors/proc.plugin/proc_stat.c570
-rw-r--r--collectors/proc.plugin/proc_sys_kernel_random_entropy_avail.c49
-rw-r--r--collectors/proc.plugin/proc_uptime.c105
-rw-r--r--collectors/proc.plugin/proc_vmstat.c259
-rw-r--r--collectors/proc.plugin/sys_devices_system_edac_mc.c206
-rw-r--r--collectors/proc.plugin/sys_devices_system_node.c163
-rw-r--r--collectors/proc.plugin/sys_fs_btrfs.c722
-rw-r--r--collectors/proc.plugin/sys_kernel_mm_ksm.c201
-rw-r--r--collectors/proc.plugin/zfs_common.c714
-rw-r--r--collectors/proc.plugin/zfs_common.h115
-rw-r--r--collectors/python.d.plugin/Makefile.am244
-rw-r--r--collectors/python.d.plugin/Makefile.in1987
-rw-r--r--collectors/python.d.plugin/README.md198
-rw-r--r--collectors/python.d.plugin/adaptec_raid/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/adaptec_raid/README.md46
-rw-r--r--collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py247
-rw-r--r--collectors/python.d.plugin/adaptec_raid/adaptec_raid.conf55
-rw-r--r--collectors/python.d.plugin/apache/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/apache/README.md59
-rw-r--r--collectors/python.d.plugin/apache/apache.chart.py132
-rw-r--r--collectors/python.d.plugin/apache/apache.conf87
-rw-r--r--collectors/python.d.plugin/beanstalk/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/beanstalk/README.md103
-rw-r--r--collectors/python.d.plugin/beanstalk/beanstalk.chart.py247
-rw-r--r--collectors/python.d.plugin/beanstalk/beanstalk.conf80
-rw-r--r--collectors/python.d.plugin/bind_rndc/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/bind_rndc/README.md60
-rw-r--r--collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py240
-rw-r--r--collectors/python.d.plugin/bind_rndc/bind_rndc.conf112
-rw-r--r--collectors/python.d.plugin/boinc/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/boinc/README.md28
-rw-r--r--collectors/python.d.plugin/boinc/boinc.chart.py162
-rw-r--r--collectors/python.d.plugin/boinc/boinc.conf68
-rw-r--r--collectors/python.d.plugin/ceph/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/ceph/README.md32
-rw-r--r--collectors/python.d.plugin/ceph/ceph.chart.py345
-rw-r--r--collectors/python.d.plugin/ceph/ceph.conf75
-rw-r--r--collectors/python.d.plugin/chrony/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/chrony/README.md31
-rw-r--r--collectors/python.d.plugin/chrony/chrony.chart.py110
-rw-r--r--collectors/python.d.plugin/chrony/chrony.conf79
-rw-r--r--collectors/python.d.plugin/couchdb/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/couchdb/README.md35
-rw-r--r--collectors/python.d.plugin/couchdb/couchdb.chart.py411
-rw-r--r--collectors/python.d.plugin/couchdb/couchdb.conf91
-rw-r--r--collectors/python.d.plugin/cpufreq/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/cpufreq/README.md30
-rw-r--r--collectors/python.d.plugin/cpufreq/cpufreq.chart.py115
-rw-r--r--collectors/python.d.plugin/cpufreq/cpufreq.conf43
-rw-r--r--collectors/python.d.plugin/cpuidle/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/cpuidle/README.md11
-rw-r--r--collectors/python.d.plugin/cpuidle/cpuidle.chart.py148
-rw-r--r--collectors/python.d.plugin/cpuidle/cpuidle.conf40
-rw-r--r--collectors/python.d.plugin/dns_query_time/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/dns_query_time/README.md10
-rw-r--r--collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py145
-rw-r--r--collectors/python.d.plugin/dns_query_time/dns_query_time.conf71
-rw-r--r--collectors/python.d.plugin/dnsdist/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/dnsdist/README.md54
-rw-r--r--collectors/python.d.plugin/dnsdist/dnsdist.chart.py133
-rw-r--r--collectors/python.d.plugin/dnsdist/dnsdist.conf85
-rw-r--r--collectors/python.d.plugin/dockerd/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/dockerd/README.md26
-rw-r--r--collectors/python.d.plugin/dockerd/dockerd.chart.py77
-rw-r--r--collectors/python.d.plugin/dockerd/dockerd.conf79
-rw-r--r--collectors/python.d.plugin/dovecot/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/dovecot/README.md73
-rw-r--r--collectors/python.d.plugin/dovecot/dovecot.chart.py147
-rw-r--r--collectors/python.d.plugin/dovecot/dovecot.conf96
-rw-r--r--collectors/python.d.plugin/elasticsearch/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/elasticsearch/README.md60
-rw-r--r--collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py644
-rw-r--r--collectors/python.d.plugin/elasticsearch/elasticsearch.conf83
-rw-r--r--collectors/python.d.plugin/example/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/example/README.md1
-rw-r--r--collectors/python.d.plugin/example/example.chart.py48
-rw-r--r--collectors/python.d.plugin/example/example.conf70
-rw-r--r--collectors/python.d.plugin/exim/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/exim/README.md13
-rw-r--r--collectors/python.d.plugin/exim/exim.chart.py41
-rw-r--r--collectors/python.d.plugin/exim/exim.conf93
-rw-r--r--collectors/python.d.plugin/fail2ban/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/fail2ban/README.md23
-rw-r--r--collectors/python.d.plugin/fail2ban/fail2ban.chart.py196
-rw-r--r--collectors/python.d.plugin/fail2ban/fail2ban.conf70
-rw-r--r--collectors/python.d.plugin/freeradius/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/freeradius/README.md70
-rw-r--r--collectors/python.d.plugin/freeradius/freeradius.chart.py129
-rw-r--r--collectors/python.d.plugin/freeradius/freeradius.conf82
-rw-r--r--collectors/python.d.plugin/go_expvar/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/go_expvar/README.md276
-rw-r--r--collectors/python.d.plugin/go_expvar/go_expvar.chart.py245
-rw-r--r--collectors/python.d.plugin/go_expvar/go_expvar.conf110
-rw-r--r--collectors/python.d.plugin/haproxy/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/haproxy/README.md49
-rw-r--r--collectors/python.d.plugin/haproxy/haproxy.chart.py370
-rw-r--r--collectors/python.d.plugin/haproxy/haproxy.conf85
-rw-r--r--collectors/python.d.plugin/hddtemp/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/hddtemp/README.md22
-rw-r--r--collectors/python.d.plugin/hddtemp/hddtemp.chart.py100
-rw-r--r--collectors/python.d.plugin/hddtemp/hddtemp.conf97
-rw-r--r--collectors/python.d.plugin/httpcheck/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/httpcheck/README.md41
-rw-r--r--collectors/python.d.plugin/httpcheck/httpcheck.chart.py121
-rw-r--r--collectors/python.d.plugin/httpcheck/httpcheck.conf100
-rw-r--r--collectors/python.d.plugin/icecast/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/icecast/README.md26
-rw-r--r--collectors/python.d.plugin/icecast/icecast.chart.py97
-rw-r--r--collectors/python.d.plugin/icecast/icecast.conf83
-rw-r--r--collectors/python.d.plugin/ipfs/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/ipfs/README.md25
-rw-r--r--collectors/python.d.plugin/ipfs/ipfs.chart.py140
-rw-r--r--collectors/python.d.plugin/ipfs/ipfs.conf79
-rw-r--r--collectors/python.d.plugin/isc_dhcpd/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/isc_dhcpd/README.md34
-rw-r--r--collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py195
-rw-r--r--collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf81
-rw-r--r--collectors/python.d.plugin/linux_power_supply/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/linux_power_supply/README.md67
-rw-r--r--collectors/python.d.plugin/linux_power_supply/linux_power_supply.chart.py160
-rw-r--r--collectors/python.d.plugin/linux_power_supply/linux_power_supply.conf81
-rw-r--r--collectors/python.d.plugin/litespeed/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/litespeed/README.md47
-rw-r--r--collectors/python.d.plugin/litespeed/litespeed.chart.py186
-rw-r--r--collectors/python.d.plugin/litespeed/litespeed.conf74
-rw-r--r--collectors/python.d.plugin/logind/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/logind/README.md54
-rw-r--r--collectors/python.d.plugin/logind/logind.chart.py79
-rw-r--r--collectors/python.d.plugin/logind/logind.conf62
-rw-r--r--collectors/python.d.plugin/mdstat/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/mdstat/README.md26
-rw-r--r--collectors/python.d.plugin/mdstat/mdstat.chart.py205
-rw-r--r--collectors/python.d.plugin/mdstat/mdstat.conf32
-rw-r--r--collectors/python.d.plugin/megacli/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/megacli/README.md48
-rw-r--r--collectors/python.d.plugin/megacli/megacli.chart.py279
-rw-r--r--collectors/python.d.plugin/megacli/megacli.conf62
-rw-r--r--collectors/python.d.plugin/memcached/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/memcached/README.md69
-rw-r--r--collectors/python.d.plugin/memcached/memcached.chart.py198
-rw-r--r--collectors/python.d.plugin/memcached/memcached.conf92
-rw-r--r--collectors/python.d.plugin/mongodb/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/mongodb/README.md141
-rw-r--r--collectors/python.d.plugin/mongodb/mongodb.chart.py731
-rw-r--r--collectors/python.d.plugin/mongodb/mongodb.conf84
-rw-r--r--collectors/python.d.plugin/monit/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/monit/README.md33
-rw-r--r--collectors/python.d.plugin/monit/monit.chart.py166
-rw-r--r--collectors/python.d.plugin/monit/monit.conf88
-rw-r--r--collectors/python.d.plugin/mysql/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/mysql/README.md90
-rw-r--r--collectors/python.d.plugin/mysql/mysql.chart.py602
-rw-r--r--collectors/python.d.plugin/mysql/mysql.conf286
-rw-r--r--collectors/python.d.plugin/nginx/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/nginx/README.md45
-rw-r--r--collectors/python.d.plugin/nginx/nginx.chart.py80
-rw-r--r--collectors/python.d.plugin/nginx/nginx.conf109
-rw-r--r--collectors/python.d.plugin/nginx_plus/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/nginx_plus/README.md125
-rw-r--r--collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py492
-rw-r--r--collectors/python.d.plugin/nginx_plus/nginx_plus.conf87
-rw-r--r--collectors/python.d.plugin/nsd/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/nsd/README.md54
-rw-r--r--collectors/python.d.plugin/nsd/nsd.chart.py100
-rw-r--r--collectors/python.d.plugin/nsd/nsd.conf93
-rw-r--r--collectors/python.d.plugin/ntpd/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/ntpd/README.md71
-rw-r--r--collectors/python.d.plugin/ntpd/ntpd.chart.py390
-rw-r--r--collectors/python.d.plugin/ntpd/ntpd.conf91
-rw-r--r--collectors/python.d.plugin/ovpn_status_log/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/ovpn_status_log/README.md32
-rw-r--r--collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py129
-rw-r--r--collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.conf99
-rw-r--r--collectors/python.d.plugin/phpfpm/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/phpfpm/README.md40
-rw-r--r--collectors/python.d.plugin/phpfpm/phpfpm.chart.py177
-rw-r--r--collectors/python.d.plugin/phpfpm/phpfpm.conf90
-rw-r--r--collectors/python.d.plugin/portcheck/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/portcheck/README.md35
-rw-r--r--collectors/python.d.plugin/portcheck/portcheck.chart.py161
-rw-r--r--collectors/python.d.plugin/portcheck/portcheck.conf70
-rw-r--r--collectors/python.d.plugin/postfix/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/postfix/README.md15
-rw-r--r--collectors/python.d.plugin/postfix/postfix.chart.py53
-rw-r--r--collectors/python.d.plugin/postfix/postfix.conf74
-rw-r--r--collectors/python.d.plugin/postgres/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/postgres/README.md68
-rw-r--r--collectors/python.d.plugin/postgres/postgres.chart.py823
-rw-r--r--collectors/python.d.plugin/postgres/postgres.conf124
-rw-r--r--collectors/python.d.plugin/powerdns/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/powerdns/README.md77
-rw-r--r--collectors/python.d.plugin/powerdns/powerdns.chart.py150
-rw-r--r--collectors/python.d.plugin/powerdns/powerdns.conf78
-rw-r--r--collectors/python.d.plugin/proxysql/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/proxysql/README.md62
-rw-r--r--collectors/python.d.plugin/proxysql/proxysql.chart.py356
-rw-r--r--collectors/python.d.plugin/proxysql/proxysql.conf118
-rw-r--r--collectors/python.d.plugin/puppet/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/puppet/README.md48
-rw-r--r--collectors/python.d.plugin/puppet/puppet.chart.py121
-rw-r--r--collectors/python.d.plugin/puppet/puppet.conf98
-rw-r--r--collectors/python.d.plugin/python.d.conf97
-rw-r--r--collectors/python.d.plugin/python.d.plugin427
-rwxr-xr-xcollectors/python.d.plugin/python.d.plugin.in427
-rw-r--r--collectors/python.d.plugin/python_modules/__init__.py0
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py89
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/LogService.py80
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py159
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py261
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py309
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py146
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/__init__.py0
-rw-r--r--collectors/python.d.plugin/python_modules/bases/__init__.py0
-rw-r--r--collectors/python.d.plugin/python_modules/bases/charts.py394
-rw-r--r--collectors/python.d.plugin/python_modules/bases/collection.py145
-rw-r--r--collectors/python.d.plugin/python_modules/bases/loaders.py83
-rw-r--r--collectors/python.d.plugin/python_modules/bases/loggers.py206
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/__init__.py316
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/composer.py140
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/constructor.py676
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py86
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/dumper.py63
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/emitter.py1141
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/error.py76
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/events.py87
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/loader.py41
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/nodes.py50
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/parser.py590
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/reader.py191
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/representer.py485
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/resolver.py225
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/scanner.py1458
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/serializer.py112
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/tokens.py105
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/__init__.py313
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/composer.py140
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/constructor.py687
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py86
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/dumper.py63
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/emitter.py1138
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/error.py76
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/events.py87
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/loader.py41
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/nodes.py50
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/parser.py590
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/reader.py193
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/representer.py375
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/resolver.py225
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/scanner.py1449
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/serializer.py112
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/tokens.py105
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/__init__.py0
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/boinc_client.py515
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/lm_sensors.py258
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/mcrcon.py74
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/monotonic.py171
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/ordereddict.py110
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/__init__.py98
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/_collections.py315
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/connection.py374
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/connectionpool.py900
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/__init__.py0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/__init__.py0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py591
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py344
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py297
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py113
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py458
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py808
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py189
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/exceptions.py247
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/fields.py179
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/filepost.py95
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py5
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/backports/__init__.py0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py54
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py260
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/six.py852
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py20
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py156
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/poolmanager.py441
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/request.py149
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/response.py623
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/__init__.py55
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/connection.py131
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/request.py119
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/response.py82
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/retry.py402
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/selectors.py582
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py338
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/timeout.py243
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/url.py231
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/wait.py41
-rw-r--r--collectors/python.d.plugin/rabbitmq/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/rabbitmq/README.md56
-rw-r--r--collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py207
-rw-r--r--collectors/python.d.plugin/rabbitmq/rabbitmq.conf82
-rw-r--r--collectors/python.d.plugin/redis/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/redis/README.md42
-rw-r--r--collectors/python.d.plugin/redis/redis.chart.py261
-rw-r--r--collectors/python.d.plugin/redis/redis.conf112
-rw-r--r--collectors/python.d.plugin/rethinkdbs/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/rethinkdbs/README.md34
-rw-r--r--collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py235
-rw-r--r--collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf78
-rw-r--r--collectors/python.d.plugin/retroshare/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/retroshare/README.md1
-rw-r--r--collectors/python.d.plugin/retroshare/retroshare.chart.py80
-rw-r--r--collectors/python.d.plugin/retroshare/retroshare.conf74
-rw-r--r--collectors/python.d.plugin/samba/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/samba/README.md67
-rw-r--r--collectors/python.d.plugin/samba/samba.chart.py138
-rw-r--r--collectors/python.d.plugin/samba/samba.conf62
-rw-r--r--collectors/python.d.plugin/sensors/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/sensors/README.md17
-rw-r--r--collectors/python.d.plugin/sensors/sensors.chart.py146
-rw-r--r--collectors/python.d.plugin/sensors/sensors.conf63
-rw-r--r--collectors/python.d.plugin/smartd_log/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/smartd_log/README.md38
-rw-r--r--collectors/python.d.plugin/smartd_log/smartd_log.chart.py353
-rw-r--r--collectors/python.d.plugin/smartd_log/smartd_log.conf90
-rw-r--r--collectors/python.d.plugin/spigotmc/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/spigotmc/README.md22
-rw-r--r--collectors/python.d.plugin/spigotmc/spigotmc.chart.py120
-rw-r--r--collectors/python.d.plugin/spigotmc/spigotmc.conf68
-rw-r--r--collectors/python.d.plugin/springboot/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/springboot/README.md129
-rw-r--r--collectors/python.d.plugin/springboot/springboot.chart.py159
-rw-r--r--collectors/python.d.plugin/springboot/springboot.conf120
-rw-r--r--collectors/python.d.plugin/squid/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/squid/README.md38
-rw-r--r--collectors/python.d.plugin/squid/squid.chart.py125
-rw-r--r--collectors/python.d.plugin/squid/squid.conf169
-rw-r--r--collectors/python.d.plugin/tomcat/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/tomcat/README.md33
-rw-r--r--collectors/python.d.plugin/tomcat/tomcat.chart.py163
-rw-r--r--collectors/python.d.plugin/tomcat/tomcat.conf91
-rw-r--r--collectors/python.d.plugin/traefik/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/traefik/README.md54
-rw-r--r--collectors/python.d.plugin/traefik/traefik.chart.py195
-rw-r--r--collectors/python.d.plugin/traefik/traefik.conf79
-rw-r--r--collectors/python.d.plugin/unbound/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/unbound/README.md76
-rw-r--r--collectors/python.d.plugin/unbound/unbound.chart.py275
-rw-r--r--collectors/python.d.plugin/unbound/unbound.conf87
-rw-r--r--collectors/python.d.plugin/uwsgi/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/uwsgi/README.md37
-rw-r--r--collectors/python.d.plugin/uwsgi/uwsgi.chart.py183
-rw-r--r--collectors/python.d.plugin/uwsgi/uwsgi.conf94
-rw-r--r--collectors/python.d.plugin/varnish/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/varnish/README.md69
-rw-r--r--collectors/python.d.plugin/varnish/varnish.chart.py252
-rw-r--r--collectors/python.d.plugin/varnish/varnish.conf64
-rw-r--r--collectors/python.d.plugin/w1sensor/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/w1sensor/README.md13
-rw-r--r--collectors/python.d.plugin/w1sensor/w1sensor.chart.py93
-rw-r--r--collectors/python.d.plugin/w1sensor/w1sensor.conf74
-rw-r--r--collectors/python.d.plugin/web_log/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/web_log/README.md64
-rw-r--r--collectors/python.d.plugin/web_log/web_log.chart.py1194
-rw-r--r--collectors/python.d.plugin/web_log/web_log.conf206
-rw-r--r--collectors/statsd.plugin/Makefile.am20
-rw-r--r--collectors/statsd.plugin/Makefile.in556
-rw-r--r--collectors/statsd.plugin/README.md523
-rw-r--r--collectors/statsd.plugin/example.conf64
-rw-r--r--collectors/statsd.plugin/statsd.c2556
-rw-r--r--collectors/statsd.plugin/statsd.h25
-rw-r--r--collectors/tc.plugin/Makefile.am20
-rw-r--r--collectors/tc.plugin/Makefile.in562
-rw-r--r--collectors/tc.plugin/README.md183
-rw-r--r--collectors/tc.plugin/plugin_tc.c1168
-rw-r--r--collectors/tc.plugin/plugin_tc.h31
-rw-r--r--collectors/tc.plugin/tc-qos-helper.sh315
-rwxr-xr-xcollectors/tc.plugin/tc-qos-helper.sh.in315
589 files changed, 119640 insertions, 0 deletions
diff --git a/collectors/Makefile.am b/collectors/Makefile.am
new file mode 100644
index 000000000..4ecd1f176
--- /dev/null
+++ b/collectors/Makefile.am
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+SUBDIRS = \
+ plugins.d \
+ apps.plugin \
+ cgroups.plugin \
+ charts.d.plugin \
+ checks.plugin \
+ diskspace.plugin \
+ fping.plugin \
+ freebsd.plugin \
+ freeipmi.plugin \
+ idlejitter.plugin \
+ macos.plugin \
+ nfacct.plugin \
+ node.d.plugin \
+ proc.plugin \
+ python.d.plugin \
+ statsd.plugin \
+ tc.plugin \
+ $(NULL)
+
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/collectors/Makefile.in b/collectors/Makefile.in
new file mode 100644
index 000000000..357f69d7a
--- /dev/null
+++ b/collectors/Makefile.in
@@ -0,0 +1,663 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = collectors
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
+ $(dist_noinst_DATA)
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \
+ ctags-recursive dvi-recursive html-recursive info-recursive \
+ install-data-recursive install-dvi-recursive \
+ install-exec-recursive install-html-recursive \
+ install-info-recursive install-pdf-recursive \
+ install-ps-recursive install-recursive installcheck-recursive \
+ installdirs-recursive pdf-recursive ps-recursive \
+ tags-recursive uninstall-recursive
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \
+ distclean-recursive maintainer-clean-recursive
+am__recursive_targets = \
+ $(RECURSIVE_TARGETS) \
+ $(RECURSIVE_CLEAN_TARGETS) \
+ $(am__extra_recursive_targets)
+AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \
+ distdir
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates. Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+ BEGIN { nonempty = 0; } \
+ { items[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique. This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+ list='$(am__tagged_files)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+DIST_SUBDIRS = $(SUBDIRS)
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+am__relativize = \
+ dir0=`pwd`; \
+ sed_first='s,^\([^/]*\)/.*$$,\1,'; \
+ sed_rest='s,^[^/]*/*,,'; \
+ sed_last='s,^.*/\([^/]*\)$$,\1,'; \
+ sed_butlast='s,/*[^/]*$$,,'; \
+ while test -n "$$dir1"; do \
+ first=`echo "$$dir1" | sed -e "$$sed_first"`; \
+ if test "$$first" != "."; then \
+ if test "$$first" = ".."; then \
+ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \
+ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \
+ else \
+ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \
+ if test "$$first2" = "$$first"; then \
+ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \
+ else \
+ dir2="../$$dir2"; \
+ fi; \
+ dir0="$$dir0"/"$$first"; \
+ fi; \
+ fi; \
+ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \
+ done; \
+ reldir="$$dir2"
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+SUBDIRS = \
+ plugins.d \
+ apps.plugin \
+ cgroups.plugin \
+ charts.d.plugin \
+ checks.plugin \
+ diskspace.plugin \
+ fping.plugin \
+ freebsd.plugin \
+ freeipmi.plugin \
+ idlejitter.plugin \
+ macos.plugin \
+ nfacct.plugin \
+ node.d.plugin \
+ proc.plugin \
+ python.d.plugin \
+ statsd.plugin \
+ tc.plugin \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-recursive
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu collectors/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+# This directory's subdirectories are mostly independent; you can cd
+# into them and run 'make' without going through this Makefile.
+# To change the values of 'make' variables: instead of editing Makefiles,
+# (1) if the variable is set in 'config.status', edit 'config.status'
+# (which will cause the Makefiles to be regenerated when you run 'make');
+# (2) otherwise, pass the desired values on the 'make' command line.
+$(am__recursive_targets):
+ @fail=; \
+ if $(am__make_keepgoing); then \
+ failcom='fail=yes'; \
+ else \
+ failcom='exit 1'; \
+ fi; \
+ dot_seen=no; \
+ target=`echo $@ | sed s/-recursive//`; \
+ case "$@" in \
+ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
+ *) list='$(SUBDIRS)' ;; \
+ esac; \
+ for subdir in $$list; do \
+ echo "Making $$target in $$subdir"; \
+ if test "$$subdir" = "."; then \
+ dot_seen=yes; \
+ local_target="$$target-am"; \
+ else \
+ local_target="$$target"; \
+ fi; \
+ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+ || eval $$failcom; \
+ done; \
+ if test "$$dot_seen" = "no"; then \
+ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
+ fi; test -z "$$fail"
+
+ID: $(am__tagged_files)
+ $(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-recursive
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ set x; \
+ here=`pwd`; \
+ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
+ include_option=--etags-include; \
+ empty_fix=.; \
+ else \
+ include_option=--include; \
+ empty_fix=; \
+ fi; \
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ test ! -f $$subdir/TAGS || \
+ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
+ fi; \
+ done; \
+ $(am__define_uniq_tagged_files); \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: ctags-recursive
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ $(am__define_uniq_tagged_files); \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-recursive
+
+cscopelist-am: $(am__tagged_files)
+ list='$(am__tagged_files)'; \
+ case "$(srcdir)" in \
+ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+ *) sdir=$(subdir)/$(srcdir) ;; \
+ esac; \
+ for i in $$list; do \
+ if test -f "$$i"; then \
+ echo "$(subdir)/$$i"; \
+ else \
+ echo "$$sdir/$$i"; \
+ fi; \
+ done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+ @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ $(am__make_dryrun) \
+ || test -d "$(distdir)/$$subdir" \
+ || $(MKDIR_P) "$(distdir)/$$subdir" \
+ || exit 1; \
+ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
+ $(am__relativize); \
+ new_distdir=$$reldir; \
+ dir1=$$subdir; dir2="$(top_distdir)"; \
+ $(am__relativize); \
+ new_top_distdir=$$reldir; \
+ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \
+ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \
+ ($(am__cd) $$subdir && \
+ $(MAKE) $(AM_MAKEFLAGS) \
+ top_distdir="$$new_top_distdir" \
+ distdir="$$new_distdir" \
+ am__remove_distdir=: \
+ am__skip_length_check=: \
+ am__skip_mode_fix=: \
+ distdir) \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-recursive
+all-am: Makefile $(DATA)
+installdirs: installdirs-recursive
+installdirs-am:
+install: install-recursive
+install-exec: install-exec-recursive
+install-data: install-data-recursive
+uninstall: uninstall-recursive
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-recursive
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-recursive
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-recursive
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic distclean-tags
+
+dvi: dvi-recursive
+
+dvi-am:
+
+html: html-recursive
+
+html-am:
+
+info: info-recursive
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-recursive
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-recursive
+
+install-html-am:
+
+install-info: install-info-recursive
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-recursive
+
+install-pdf-am:
+
+install-ps: install-ps-recursive
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-recursive
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-recursive
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-recursive
+
+pdf-am:
+
+ps: ps-recursive
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: $(am__recursive_targets) install-am install-strip
+
+.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \
+ check-am clean clean-generic cscopelist-am ctags ctags-am \
+ distclean distclean-generic distclean-tags distdir dvi dvi-am \
+ html html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs installdirs-am maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags tags-am uninstall uninstall-am
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/collectors/README.md b/collectors/README.md
new file mode 100644
index 000000000..b7fc73286
--- /dev/null
+++ b/collectors/README.md
@@ -0,0 +1,118 @@
+# Data Collection Plugins
+
+netdata supports **internal** and **external** data collection plugins:
+
+- **internal** plugins are written in `C` and run as threads inside the netdata daemon.
+
+- **external** plugins may be written in any computer language and are spawn as independent long-running processes by the netdata daemon.
+ They communicate with the netdata daemon via `pipes` (`stdout` communication).
+
+To minimize the number of processes spawn for data collection, netdata also supports **plugin orchestrators**.
+
+- **plugin orchestrators** are external plugins that do not collect any data by themeselves.
+ Instead they support data collection **modules** written in the language of the orchestrator.
+ Usually the orchestrator provides a higher level abstraction, making it ideal for writing new
+ data collection modules with the minimum of code.
+
+ Currently netdata provides plugin orchestrators
+ BASH v4+ [charts.d.plugin](charts.d.plugin),
+ node.js [node.d.plugin](node.d.plugin) and
+ python v2+ (including v3) [python.d.plugin](python.d.plugin).
+
+## Netdata Plugins
+
+plugin|lang|O/S|runs as|modular|description
+:---:|:---:|:---:|:---:|:---:|:---
+[apps.plugin](apps.plugin/)|`C`|linux, freebsd|external|-|monitors the whole process tree on Linux and FreeBSD and breaks down system resource usage by **process**, **user** and **user group**.
+[cgroups.plugin](cgroups.plugin/)|`C`|linux|internal|-|collects resource usage of **Containers**, libvirt **VMs** and **systemd services**, on Linux systems
+[charts.d.plugin](charts.d.plugin/)|`BASH` v4+|any|external|yes|a **plugin orchestrator** for data collection modules written in `BASH` v4+.
+[checks.plugin](checks.plugin/)|`C`|any|internal|-|a debugging plugin (by default it is disabled)
+[diskspace.plugin](diskspace.plugin/)|`C`|linux|internal|-|collects disk space usage metrics on Linux mount points
+[fping.plugin](fping.plugin/)|`C`|any|external|-|measures network latency, jitter and packet loss between the monitored node and any number of remote network end points.
+[freebsd.plugin](freebsd.plugin/)|`C`|freebsd|internal|yes|collects resource usage and performance data on FreeBSD systems
+[freeipmi.plugin](freeipmi.plugin/)|`C`|linux, freebsd|external|-|collects metrics from enterprise hardware sensors, on Linux and FreeBSD servers.
+[idlejitter.plugin](idlejitter.plugin/)|`C`|any|internal|-|measures CPU latency and jitter on all operating systems
+[macos.plugin](macos.plugin/)|`C`|macos|internal|yes|collects resource usage and performance data on MacOS systems
+[nfacct.plugin](nfacct.plugin/)|`C`|linux|internal|-|collects netfilter firewall, connection tracker and accounting metrics using `libmnl` and `libnetfilter_acct`
+[node.d.plugin](node.d.plugin/)|`node.js`|any|external|yes|a **plugin orchestrator** for data collection modules written in `node.js`.
+[plugins.d](plugins.d/)|`C`|any|internal|-|implements the **external plugins** API and serves external plugins
+[proc.plugin](proc.plugin/)|`C`|linux|internal|yes|collects resource usage and performance data on Linux systems
+[python.d.plugin](python.d.plugin/)|`python` v2+|any|external|yes|a **plugin orchestrator** for data collection modules written in `python` v2 or v3 (both are supported).
+[statsd.plugin](statsd.plugin/)|`C`|any|internal|-|implements a high performance **statsd** server for netdata
+[tc.plugin](tc.plugin/)|`C`|linux|internal|-|collects traffic QoS metrics (`tc`) of Linux network interfaces
+
+## Enabling and Disabling plugins
+
+Each plugin can be enabled or disabled via `netdata.conf`, section `[plugins]`.
+
+At this section there a list of all the plugins with a boolean setting to enable them or disable them.
+
+The exception is `statsd.plugin` that has its own `[statsd]` section.
+
+Once a plugin is enabled, consult the page of each plugin for additional configuration options.
+
+All **external plugins** are managed by [plugins.d](plugins.d/), which provides additional management options.
+
+### Internal Plugins
+
+Each of the internal plugins runs as a thread inside the netdata daemon.
+Once this thread has started, the plugin may spawn additional threads according to its design.
+
+#### Internal Plugins API
+
+The internal data collection API consists of the following calls:
+
+```c
+collect_data() {
+ // collect data here (one iteration)
+
+ collected_number collected_value = collect_a_value();
+
+ // give the metrics to netdata
+
+ static RRDSET *st = NULL; // the chart
+ static RRDDIM *rd = NULL; // a dimension attached to this chart
+
+ if(unlikely(!st)) {
+ // we haven't created this chart before
+ // create it now
+ st = rrdset_create_localhost(
+ "type"
+ , "id"
+ , "name"
+ , "family"
+ , "context"
+ , "Chart Title"
+ , "units"
+ , "plugin-name"
+ , "module-name"
+ , priority
+ , update_every
+ , chart_type
+ );
+
+ // attach a metric to it
+ rd = rrddim_add(st, "id", "name", multiplier, divider, algorithm);
+ }
+ else {
+ // this chart is already created
+ // let netdata know we start a new iteration on it
+ rrdset_next(st);
+ }
+
+ // give the collected value(s) to the chart
+ rrddim_set_by_pointer(st, rd, collected_value);
+
+ // signal netdata we are done with this iteration
+ rrdset_done(st);
+}
+```
+
+Of course netdata has a lot of libraries to help you also in collecting the metrics.
+The best way to find your way through this, is to examine what other similar plugins do.
+
+
+### External Plugins
+
+**External plugins** use the API and are managed by [plugins.d](plugins.d/).
+
diff --git a/collectors/all.h b/collectors/all.h
new file mode 100644
index 000000000..aa19bd5bd
--- /dev/null
+++ b/collectors/all.h
@@ -0,0 +1,318 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_ALL_H
+#define NETDATA_ALL_H 1
+
+#include "../daemon/common.h"
+
+// netdata internal data collection plugins
+
+#include "checks.plugin/plugin_checks.h"
+#include "freebsd.plugin/plugin_freebsd.h"
+#include "idlejitter.plugin/plugin_idlejitter.h"
+#include "cgroups.plugin/sys_fs_cgroup.h"
+#include "diskspace.plugin/plugin_diskspace.h"
+#include "nfacct.plugin/plugin_nfacct.h"
+#include "proc.plugin/plugin_proc.h"
+#include "tc.plugin/plugin_tc.h"
+#include "macos.plugin/plugin_macos.h"
+#include "statsd.plugin/statsd.h"
+
+#include "plugins.d/plugins_d.h"
+
+
+// ----------------------------------------------------------------------------
+// netdata chart priorities
+
+// This is a work in progress - to scope is to collect here all chart priorities.
+// These should be based on the CONTEXT of the charts + the chart id when needed
+// - for each SECTION +1000 (or +X000 for big sections)
+// - for each FAMILY +100
+// - for each CHART +10
+
+#define NETDATA_CHART_PRIO_SYSTEM_CPU 100
+#define NETDATA_CHART_PRIO_SYSTEM_LOAD 100
+#define NETDATA_CHART_PRIO_SYSTEM_IO 150
+#define NETDATA_CHART_PRIO_SYSTEM_PGPGIO 151
+#define NETDATA_CHART_PRIO_SYSTEM_RAM 200
+#define NETDATA_CHART_PRIO_SYSTEM_SWAP 201
+#define NETDATA_CHART_PRIO_SYSTEM_SWAPIO 250
+#define NETDATA_CHART_PRIO_SYSTEM_NET 500
+#define NETDATA_CHART_PRIO_SYSTEM_IPV4 500 // freebsd only
+#define NETDATA_CHART_PRIO_SYSTEM_IP 501
+#define NETDATA_CHART_PRIO_SYSTEM_IPV6 502
+#define NETDATA_CHART_PRIO_SYSTEM_PROCESSES 600
+#define NETDATA_CHART_PRIO_SYSTEM_FORKS 700
+#define NETDATA_CHART_PRIO_SYSTEM_ACTIVE_PROCESSES 750
+#define NETDATA_CHART_PRIO_SYSTEM_CTXT 800
+#define NETDATA_CHART_PRIO_SYSTEM_IDLEJITTER 800
+#define NETDATA_CHART_PRIO_SYSTEM_INTR 900
+#define NETDATA_CHART_PRIO_SYSTEM_SOFTIRQS 950
+#define NETDATA_CHART_PRIO_SYSTEM_SOFTNET_STAT 955
+#define NETDATA_CHART_PRIO_SYSTEM_INTERRUPTS 1000
+#define NETDATA_CHART_PRIO_SYSTEM_DEV_INTR 1000 // freebsd only
+#define NETDATA_CHART_PRIO_SYSTEM_SOFT_INTR 1100 // freebsd only
+#define NETDATA_CHART_PRIO_SYSTEM_ENTROPY 1000
+#define NETDATA_CHART_PRIO_SYSTEM_UPTIME 1000
+#define NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_QUEUES 990 // freebsd only
+#define NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_MESSAGES 1000 // freebsd only
+#define NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_SIZE 1100 // freebsd only
+#define NETDATA_CHART_PRIO_SYSTEM_IPC_SEMAPHORES 1000
+#define NETDATA_CHART_PRIO_SYSTEM_IPC_SEM_ARRAYS 1000
+#define NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_SEGS 1000 // freebsd only
+#define NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_SIZE 1000 // freebsd only
+#define NETDATA_CHART_PRIO_SYSTEM_PACKETS 7001 // freebsd only
+
+
+// CPU per core
+
+#define NETDATA_CHART_PRIO_CPU_PER_CORE 1000 // +1 per core
+#define NETDATA_CHART_PRIO_CPU_TEMPERATURE 1050 // freebsd only
+#define NETDATA_CHART_PRIO_CPUFREQ_SCALING_CUR_FREQ 5003 // freebsd only
+
+#define NETDATA_CHART_PRIO_CORE_THROTTLING 5001
+#define NETDATA_CHART_PRIO_PACKAGE_THROTTLING 5002
+
+// Interrupts per core
+
+#define NETDATA_CHART_PRIO_INTERRUPTS_PER_CORE 1100 // +1 per core
+
+// Memory Section - 1xxx
+
+#define NETDATA_CHART_PRIO_MEM_SYSTEM_AVAILABLE 1010
+#define NETDATA_CHART_PRIO_MEM_SYSTEM_COMMITTED 1020
+#define NETDATA_CHART_PRIO_MEM_SYSTEM_PGFAULTS 1030
+#define NETDATA_CHART_PRIO_MEM_KERNEL 1100
+#define NETDATA_CHART_PRIO_MEM_SLAB 1200
+#define NETDATA_CHART_PRIO_MEM_HUGEPAGES 1250
+#define NETDATA_CHART_PRIO_MEM_KSM 1300
+#define NETDATA_CHART_PRIO_MEM_KSM_SAVINGS 1301
+#define NETDATA_CHART_PRIO_MEM_KSM_RATIOS 1302
+#define NETDATA_CHART_PRIO_MEM_NUMA 1400
+#define NETDATA_CHART_PRIO_MEM_NUMA_NODES 1410
+#define NETDATA_CHART_PRIO_MEM_HW 1500
+#define NETDATA_CHART_PRIO_MEM_HW_ECC_CE 1550
+#define NETDATA_CHART_PRIO_MEM_HW_ECC_UE 1560
+
+// Disks
+
+#define NETDATA_CHART_PRIO_DISK_IO 2000
+#define NETDATA_CHART_PRIO_DISK_OPS 2001
+#define NETDATA_CHART_PRIO_DISK_QOPS 2002
+#define NETDATA_CHART_PRIO_DISK_BACKLOG 2003
+#define NETDATA_CHART_PRIO_DISK_UTIL 2004
+#define NETDATA_CHART_PRIO_DISK_AWAIT 2005
+#define NETDATA_CHART_PRIO_DISK_AVGSZ 2006
+#define NETDATA_CHART_PRIO_DISK_SVCTM 2007
+#define NETDATA_CHART_PRIO_DISK_MOPS 2021
+#define NETDATA_CHART_PRIO_DISK_IOTIME 2022
+#define NETDATA_CHART_PRIO_BCACHE_CACHE_ALLOC 2120
+#define NETDATA_CHART_PRIO_BCACHE_HIT_RATIO 2120
+#define NETDATA_CHART_PRIO_BCACHE_RATES 2121
+#define NETDATA_CHART_PRIO_BCACHE_SIZE 2122
+#define NETDATA_CHART_PRIO_BCACHE_USAGE 2123
+#define NETDATA_CHART_PRIO_BCACHE_OPS 2124
+#define NETDATA_CHART_PRIO_BCACHE_BYPASS 2125
+#define NETDATA_CHART_PRIO_BCACHE_CACHE_READ_RACES 2126
+
+#define NETDATA_CHART_PRIO_DISKSPACE_SPACE 2023
+#define NETDATA_CHART_PRIO_DISKSPACE_INODES 2024
+
+// NFS (server)
+
+#define NETDATA_CHART_PRIO_NFSD_READCACHE 2100
+#define NETDATA_CHART_PRIO_NFSD_FILEHANDLES 2101
+#define NETDATA_CHART_PRIO_NFSD_IO 2102
+#define NETDATA_CHART_PRIO_NFSD_THREADS 2103
+#define NETDATA_CHART_PRIO_NFSD_THREADS_FULLCNT 2104
+#define NETDATA_CHART_PRIO_NFSD_THREADS_HISTOGRAM 2105
+#define NETDATA_CHART_PRIO_NFSD_READAHEAD 2105
+#define NETDATA_CHART_PRIO_NFSD_NET 2107
+#define NETDATA_CHART_PRIO_NFSD_RPC 2108
+#define NETDATA_CHART_PRIO_NFSD_PROC2 2109
+#define NETDATA_CHART_PRIO_NFSD_PROC3 2110
+#define NETDATA_CHART_PRIO_NFSD_PROC4 2111
+#define NETDATA_CHART_PRIO_NFSD_PROC4OPS 2112
+
+// NFS (client)
+
+#define NETDATA_CHART_PRIO_NFS_NET 2207
+#define NETDATA_CHART_PRIO_NFS_RPC 2208
+#define NETDATA_CHART_PRIO_NFS_PROC2 2209
+#define NETDATA_CHART_PRIO_NFS_PROC3 2210
+#define NETDATA_CHART_PRIO_NFS_PROC4 2211
+
+// BTRFS
+
+#define NETDATA_CHART_PRIO_BTRFS_DISK 2300
+#define NETDATA_CHART_PRIO_BTRFS_DATA 2301
+#define NETDATA_CHART_PRIO_BTRFS_METADATA 2302
+#define NETDATA_CHART_PRIO_BTRFS_SYSTEM 2303
+
+// ZFS
+
+#define NETDATA_CHART_PRIO_ZFS_ARC_SIZE 2500
+#define NETDATA_CHART_PRIO_ZFS_L2_SIZE 2500
+#define NETDATA_CHART_PRIO_ZFS_READS 2510
+#define NETDATA_CHART_PRIO_ZFS_ACTUAL_HITS 2519
+#define NETDATA_CHART_PRIO_ZFS_ARC_SIZE_BREAKDOWN 2520
+#define NETDATA_CHART_PRIO_ZFS_IMPORTANT_OPS 2522
+#define NETDATA_CHART_PRIO_ZFS_MEMORY_OPS 2523
+#define NETDATA_CHART_PRIO_ZFS_IO 2700
+#define NETDATA_CHART_PRIO_ZFS_HITS 2520
+#define NETDATA_CHART_PRIO_ZFS_DHITS 2530
+#define NETDATA_CHART_PRIO_ZFS_DEMAND_DATA_HITS 2531
+#define NETDATA_CHART_PRIO_ZFS_PREFETCH_DATA_HITS 2532
+#define NETDATA_CHART_PRIO_ZFS_PHITS 2540
+#define NETDATA_CHART_PRIO_ZFS_MHITS 2550
+#define NETDATA_CHART_PRIO_ZFS_L2HITS 2560
+#define NETDATA_CHART_PRIO_ZFS_LIST_HITS 2600
+#define NETDATA_CHART_PRIO_ZFS_HASH_ELEMENTS 2800
+#define NETDATA_CHART_PRIO_ZFS_HASH_CHAINS 2810
+
+
+// SOFTIRQs
+
+#define NETDATA_CHART_PRIO_SOFTIRQS_PER_CORE 3000 // +1 per core
+
+// IPFW (freebsd)
+
+#define NETDATA_CHART_PRIO_IPFW_PACKETS 3001
+#define NETDATA_CHART_PRIO_IPFW_BYTES 3002
+#define NETDATA_CHART_PRIO_IPFW_ACTIVE 3003
+#define NETDATA_CHART_PRIO_IPFW_EXPIRED 3004
+#define NETDATA_CHART_PRIO_IPFW_MEM 3005
+
+
+// IPVS
+
+#define NETDATA_CHART_PRIO_IPVS_NET 3100
+#define NETDATA_CHART_PRIO_IPVS_SOCKETS 3101
+#define NETDATA_CHART_PRIO_IPVS_PACKETS 3102
+
+// Softnet
+
+#define NETDATA_CHART_PRIO_SOFTNET_PER_CORE 4101 // +1 per core
+
+// IP STACK
+
+#define NETDATA_CHART_PRIO_IP_ERRORS 4100
+#define NETDATA_CHART_PRIO_IP_TCP_CONNABORTS 4210
+#define NETDATA_CHART_PRIO_IP_TCP_SYN_QUEUE 4215
+#define NETDATA_CHART_PRIO_IP_TCP_ACCEPT_QUEUE 4216
+#define NETDATA_CHART_PRIO_IP_TCP_REORDERS 4220
+#define NETDATA_CHART_PRIO_IP_TCP_OFO 4250
+#define NETDATA_CHART_PRIO_IP_TCP_SYNCOOKIES 4260
+#define NETDATA_CHART_PRIO_IP_TCP_MEM 4290
+#define NETDATA_CHART_PRIO_IP_BCAST 4500
+#define NETDATA_CHART_PRIO_IP_BCAST_PACKETS 4510
+#define NETDATA_CHART_PRIO_IP_MCAST 4600
+#define NETDATA_CHART_PRIO_IP_MCAST_PACKETS 4610
+#define NETDATA_CHART_PRIO_IP_ECN 4700
+
+// IPv4
+
+#define NETDATA_CHART_PRIO_IPV4_SOCKETS 5100
+#define NETDATA_CHART_PRIO_IPV4_PACKETS 5130
+#define NETDATA_CHART_PRIO_IPV4_ERRORS 5150
+#define NETDATA_CHART_PRIO_IPV4_ICMP 5170
+#define NETDATA_CHART_PRIO_IPV4_TCP 5200
+#define NETDATA_CHART_PRIO_IPV4_TCP_SOCKETS 5201
+#define NETDATA_CHART_PRIO_IPV4_TCP_MEM 5290
+#define NETDATA_CHART_PRIO_IPV4_UDP 5300
+#define NETDATA_CHART_PRIO_IPV4_UDP_MEM 5390
+#define NETDATA_CHART_PRIO_IPV4_UDPLITE 5400
+#define NETDATA_CHART_PRIO_IPV4_RAW 5450
+#define NETDATA_CHART_PRIO_IPV4_FRAGMENTS 5460
+#define NETDATA_CHART_PRIO_IPV4_FRAGMENTS_MEM 5470
+
+// IPv6
+
+#define NETDATA_CHART_PRIO_IPV6_PACKETS 6200
+#define NETDATA_CHART_PRIO_IPV6_ECT 6210
+#define NETDATA_CHART_PRIO_IPV6_ERRORS 6300
+#define NETDATA_CHART_PRIO_IPV6_FRAGMENTS 6400
+#define NETDATA_CHART_PRIO_IPV6_FRAGSOUT 6401
+#define NETDATA_CHART_PRIO_IPV6_FRAGSIN 6402
+#define NETDATA_CHART_PRIO_IPV6_TCP 6500
+#define NETDATA_CHART_PRIO_IPV6_UDP 6600
+#define NETDATA_CHART_PRIO_IPV6_UDP_PACKETS 6601
+#define NETDATA_CHART_PRIO_IPV6_UDP_ERRORS 6610
+#define NETDATA_CHART_PRIO_IPV6_UDPLITE 6700
+#define NETDATA_CHART_PRIO_IPV6_UDPLITE_PACKETS 6701
+#define NETDATA_CHART_PRIO_IPV6_UDPLITE_ERRORS 6710
+#define NETDATA_CHART_PRIO_IPV6_RAW 6800
+#define NETDATA_CHART_PRIO_IPV6_BCAST 6840
+#define NETDATA_CHART_PRIO_IPV6_MCAST 6850
+#define NETDATA_CHART_PRIO_IPV6_MCAST_PACKETS 6851
+#define NETDATA_CHART_PRIO_IPV6_ICMP 6900
+#define NETDATA_CHART_PRIO_IPV6_ICMP_REDIR 6910
+#define NETDATA_CHART_PRIO_IPV6_ICMP_ERRORS 6920
+#define NETDATA_CHART_PRIO_IPV6_ICMP_ECHOS 6930
+#define NETDATA_CHART_PRIO_IPV6_ICMP_GROUPMEMB 6940
+#define NETDATA_CHART_PRIO_IPV6_ICMP_ROUTER 6950
+#define NETDATA_CHART_PRIO_IPV6_ICMP_NEIGHBOR 6960
+#define NETDATA_CHART_PRIO_IPV6_ICMP_LDV2 6970
+#define NETDATA_CHART_PRIO_IPV6_ICMP_TYPES 6980
+
+
+// Network interfaces
+
+#define NETDATA_CHART_PRIO_FIRST_NET_IFACE 7000 // 6 charts per interface
+#define NETDATA_CHART_PRIO_FIRST_NET_PACKETS 7001
+#define NETDATA_CHART_PRIO_FIRST_NET_ERRORS 7002
+#define NETDATA_CHART_PRIO_FIRST_NET_DROPS 7003
+#define NETDATA_CHART_PRIO_FIRST_NET_EVENTS 7006
+#define NETDATA_CHART_PRIO_CGROUP_NET_IFACE 43000
+
+// SCTP
+
+#define NETDATA_CHART_PRIO_SCTP 7000
+
+// QoS
+
+#define NETDATA_CHART_PRIO_TC_QOS 7000
+#define NETDATA_CHART_PRIO_TC_QOS_PACKETS 7010
+#define NETDATA_CHART_PRIO_TC_QOS_DROPPED 7020
+#define NETDATA_CHART_PRIO_TC_QOS_TOCKENS 7030
+#define NETDATA_CHART_PRIO_TC_QOS_CTOCKENS 7040
+
+
+// Netfilter
+
+#define NETDATA_CHART_PRIO_NETFILTER_SOCKETS 8700
+#define NETDATA_CHART_PRIO_NETFILTER_NEW 8701
+#define NETDATA_CHART_PRIO_NETFILTER_CHANGES 8702
+#define NETDATA_CHART_PRIO_NETFILTER_EXPECT 8703
+#define NETDATA_CHART_PRIO_NETFILTER_ERRORS 8705
+#define NETDATA_CHART_PRIO_NETFILTER_SEARCH 8710
+
+#define NETDATA_CHART_PRIO_NETFILTER_PACKETS 8906
+#define NETDATA_CHART_PRIO_NETFILTER_BYTES 8907
+
+// SYNPROXY
+
+#define NETDATA_CHART_PRIO_SYNPROXY_SYN_RECEIVED 8751
+#define NETDATA_CHART_PRIO_SYNPROXY_COOKIES 8752
+#define NETDATA_CHART_PRIO_SYNPROXY_CONN_OPEN 8753
+#define NETDATA_CHART_PRIO_SYNPROXY_ENTRIES 8754
+
+// CGROUPS
+
+#define NETDATA_CHART_PRIO_CGROUPS_SYSTEMD 19000 // many charts
+#define NETDATA_CHART_PRIO_CGROUPS_CONTAINERS 40000 // many charts
+
+// STATSD
+
+#define NETDATA_CHART_PRIO_STATSD_PRIVATE 90000 // many charts
+
+// INTERNAL NETDATA INFO
+
+#define NETDATA_CHART_PRIO_CHECKS 99999
+
+#define NETDATA_CHART_PRIO_NETDATA_DISKSPACE 132020
+#define NETDATA_CHART_PRIO_NETDATA_TC_CPU 135000
+#define NETDATA_CHART_PRIO_NETDATA_TC_TIME 135001
+
+
+#endif //NETDATA_ALL_H
diff --git a/collectors/apps.plugin/Makefile.am b/collectors/apps.plugin/Makefile.am
new file mode 100644
index 000000000..be0306492
--- /dev/null
+++ b/collectors/apps.plugin/Makefile.am
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+dist_libconfig_DATA = \
+ apps_groups.conf \
+ $(NULL)
diff --git a/collectors/apps.plugin/Makefile.in b/collectors/apps.plugin/Makefile.in
new file mode 100644
index 000000000..38120c048
--- /dev/null
+++ b/collectors/apps.plugin/Makefile.in
@@ -0,0 +1,521 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = collectors/apps.plugin
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
+ $(dist_libconfig_DATA) $(dist_noinst_DATA)
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+ test -z "$$files" \
+ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+ $(am__cd) "$$dir" && rm -f $$files; }; \
+ }
+am__installdirs = "$(DESTDIR)$(libconfigdir)"
+DATA = $(dist_libconfig_DATA) $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+dist_libconfig_DATA = \
+ apps_groups.conf \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/apps.plugin/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu collectors/apps.plugin/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+install-dist_libconfigDATA: $(dist_libconfig_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \
+ done
+
+uninstall-dist_libconfigDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir)
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+ for dir in "$(DESTDIR)$(libconfigdir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am: install-dist_libconfigDATA
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-dist_libconfigDATA
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dist_libconfigDATA install-dvi \
+ install-dvi-am install-exec install-exec-am install-html \
+ install-html-am install-info install-info-am install-man \
+ install-pdf install-pdf-am install-ps install-ps-am \
+ install-strip installcheck installcheck-am installdirs \
+ maintainer-clean maintainer-clean-generic mostlyclean \
+ mostlyclean-generic pdf pdf-am ps ps-am tags-am uninstall \
+ uninstall-am uninstall-dist_libconfigDATA
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/collectors/apps.plugin/README.md b/collectors/apps.plugin/README.md
new file mode 100644
index 000000000..05680efe8
--- /dev/null
+++ b/collectors/apps.plugin/README.md
@@ -0,0 +1,372 @@
+# apps.plugin
+
+`apps.plugin` breaks down system resource usage to **processes**, **users** and **user groups**.
+
+To achieve this task, it iterates through the whole process tree, collecting resource usage information
+for every process found running.
+
+Since netdata needs to present this information in charts and track them through time,
+instead of presenting a `top` like list, `apps.plugin` uses a pre-defined list of **process groups**
+to which it assigns all running processes. This list is [customizable](apps_groups.conf) and netdata
+ships with a good default for most cases (to edit it on your system run `/etc/netdata/edit-config apps_groups.conf`).
+
+So, `apps.plugin` builds a process tree (much like `ps fax` does in Linux), and groups
+processes together (evaluating both child and parent processes) so that the result is always a list with
+a predefined set of members (of course, only process groups found running are reported).
+
+> If you find that `apps.plugin` categorizes standard applications as `other`, we would be
+> glad to accept pull requests improving the [defaults](apps_groups.conf) shipped with netdata.
+
+Unlike traditional process monitoring tools (like `top`), `apps.plugin` is able to account the resource
+utilization of exit processes. Their utilization is accounted at their currently running parents.
+So, `apps.plugin` is perfectly able to measure the resources used by shell scripts and other processes
+that fork/spawn other short lived processes hundreds of times per second.
+
+## Charts
+
+`apps.plugin` provides charts for 3 sections:
+
+1. Per application charts as **Applications** at netdata dashboards
+2. Per user charts as **Users** at netdata dashboards
+3. Per user group charts as **User Groups** at netdata dashboards
+
+Each of these sections provides the same number of charts:
+
+- CPU Utilization
+ - Total CPU usage
+ - User / System CPU usage
+- Disk I/O
+ - Physical Reads / Writes
+ - Logical Reads / Writes
+ - Open Unique Files (if a file is found open multiple times, it is counted just once)
+- Memory
+ - Real Memory Used (non shared)
+ - Virtual Memory Allocated
+ - Minor Page Faults (i.e. memory activity)
+- Processes
+ - Threads Running
+ - Processes Running
+ - Pipes Open
+- Swap Memory
+ - Swap Memory Used
+ - Major Page Faults (i.e. swap activity)
+- Network
+ - Sockets Open
+
+The above are reported:
+
+- For **Applications** per [target configured](apps_groups.conf).
+- For **Users** per username or UID (when the username is not available).
+- For **User Groups** per groupname or GID (when groupname is not available).
+
+## Performance
+
+`apps.plugin` is a complex piece of software and has a lot of work to do
+We are proud that `apps.plugin` is a lot faster compared to any other similar tool,
+while collecting a lot more information for the processes, however the fact is that
+this plugin requires more CPU resources than the netdata daemon itself.
+
+Under Linux, for each process running, `apps.plugin` reads several `/proc` files
+per process. Doing this work per-second, especially on hosts with several thousands
+of processes, may increase the CPU resources consumed by the plugin.
+
+In such cases, you many need to lower its data collection frequency.
+
+To do this, edit `/etc/netdata/netdata.conf` and find this section:
+
+```
+[plugin:apps]
+ # update every = 1
+ # command options =
+```
+
+Uncomment the line `update every` and set it to a higher number. If you just set it to ` 2 `,
+its CPU resources will be cut in half, and data collection will be once every 2 seconds.
+
+## Configuration
+
+The configuration file is `/etc/netdata/apps_groups.conf` (the default is [here](apps_groups.conf)).
+To edit it on your system run `/etc/netdata/edit-config apps_groups.conf`.
+
+The configuration file works accepts multiple lines, each having this format:
+
+```txt
+group: process1 process2 ...
+```
+
+Each group can be given multiple times, to add more processes to it.
+
+For the **Applications** section, only groups configured in this file are reported.
+All other processes will be reported as `other`.
+
+For each process given, its whole process tree will be grouped, not just the process matched.
+The plugin will include both parents and children.
+
+The process names are the ones returned by:
+
+ - `ps -e` or `cat /proc/PID/stat`
+ - in case of substring mode (see below): `/proc/PID/cmdline`
+
+To add process names with spaces, enclose them in quotes (single or double)
+example: ` 'Plex Media Serv' ` or ` "my other process" `.
+
+You can add an asterisk ` * ` at the beginning and/or the end of a process:
+
+ - `*name` *suffix* mode: will search for processes ending with `name` (at `/proc/PID/stat`)
+ - `name*` *prefix* mode: will search for processes beginning with `name` (at `/proc/PID/stat`)
+ - `*name*` *substring* mode: will search for `name` in the whole command line (at `/proc/PID/cmdline`)
+
+If you enter even just one *name* (substring), `apps.plugin` will process
+`/proc/PID/cmdline` for all processes (of course only once per process: when they are first seen).
+
+To add processes with single quotes, enclose them in double quotes: ` "process with this ' single quote" `
+
+To add processes with double quotes, enclose them in single quotes: ` 'process with this " double quote' `
+
+If a group or process name starts with a ` - `, the dimension will be hidden from the chart (cpu chart only).
+
+If a process starts with a ` + `, debugging will be enabled for it (debugging produces a lot of output - do not enable it in production systems).
+
+You can add any number of groups. Only the ones found running will affect the charts generated.
+However, producing charts with hundreds of dimensions may slow down your web browser.
+
+The order of the entries in this list is important: the first that matches a process is used, so put important
+ones at the top. Processes not matched by any row, will inherit it from their parents or children.
+
+The order also controls the order of the dimensions on the generated charts (although applications started
+after apps.plugin is started, will be appended to the existing list of dimensions the netdata daemon maintains).
+
+## Permissions
+
+`apps.plugin` requires additional privileges to collect all the information it needs.
+The problem is described in issue #157.
+
+When netdata is installed, `apps.plugin` is given the capabilities `cap_dac_read_search,cap_sys_ptrace+ep`.
+If this fails (i.e. `setcap` fails), `apps.plugin` is setuid to `root`.
+
+#### linux capabilities in containers
+
+There are a few cases, like `docker` and `virtuozzo` containers, where `setcap` succeeds, but the capabilities
+are silently ignored (in `lxc` containers `setcap` fails).
+
+In these cases ()`setcap` succeeds but capabilities do not work), you will have to setuid
+to root `apps.plugin` by running these commands:
+
+```sh
+chown root:netdata /usr/libexec/netdata/plugins.d/apps.plugin
+chmod 4750 /usr/libexec/netdata/plugins.d/apps.plugin
+```
+
+You will have to run these, every time you update netdata.
+
+## Security
+
+`apps.plugin` performs a hard-coded function of building the process tree in memory,
+iterating forever, collecting metrics for each running process and sending them to netdata.
+This is a one-way communication, from `apps.plugin` to netdata.
+
+So, since `apps.plugin` cannot be instructed by netdata for the actions it performs,
+we think it is pretty safe to allow it have these increased privileges.
+
+Keep in mind that `apps.plugin` will still run without escalated permissions,
+but it will not be able to collect all the information.
+
+## Application Badges
+
+You can create badges that you can embed anywhere you like, with URLs like this:
+
+```
+https://your.netdata.ip:19999/api/v1/badge.svg?chart=apps.processes&dimensions=myapp&value_color=green%3E0%7Cred
+```
+
+The color expression unescaped is this: `value_color=green>0|red`.
+
+Here is an example for the process group `sql` at `https://registry.my-netdata.io`:
+
+![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.processes&dimensions=sql&value_color=green%3E0%7Cred)
+
+Netdata is able give you a lot more badges for your app.
+Examples below for process group `sql`:
+
+- CPU usage: ![image](http://registry.my-netdata.io/api/v1/badge.svg?chart=apps.cpu&dimensions=sql&value_color=green=0%7Corange%3C50%7Cred)
+- Disk Physical Reads ![image](http://registry.my-netdata.io/api/v1/badge.svg?chart=apps.preads&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred)
+- Disk Physical Writes ![image](http://registry.my-netdata.io/api/v1/badge.svg?chart=apps.pwrites&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred)
+- Disk Logical Reads ![image](http://registry.my-netdata.io/api/v1/badge.svg?chart=apps.lreads&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred)
+- Disk Logical Writes ![image](http://registry.my-netdata.io/api/v1/badge.svg?chart=apps.lwrites&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred)
+- Open Files ![image](http://registry.my-netdata.io/api/v1/badge.svg?chart=apps.files&dimensions=sql&value_color=green%3E30%7Cred)
+- Real Memory ![image](http://registry.my-netdata.io/api/v1/badge.svg?chart=apps.mem&dimensions=sql&value_color=green%3C100%7Corange%3C200%7Cred)
+- Virtual Memory ![image](http://registry.my-netdata.io/api/v1/badge.svg?chart=apps.vmem&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred)
+- Swap Memory ![image](http://registry.my-netdata.io/api/v1/badge.svg?chart=apps.swap&dimensions=sql&value_color=green=0%7Cred)
+- Minor Page Faults ![image](http://registry.my-netdata.io/api/v1/badge.svg?chart=apps.minor_faults&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred)
+- Processes ![image](http://registry.my-netdata.io/api/v1/badge.svg?chart=apps.processes&dimensions=sql&value_color=green%3E0%7Cred)
+- Threads ![image](http://registry.my-netdata.io/api/v1/badge.svg?chart=apps.threads&dimensions=sql&value_color=green%3E=28%7Cred)
+- Major Faults (swap activity) ![image](http://registry.my-netdata.io/api/v1/badge.svg?chart=apps.major_faults&dimensions=sql&value_color=green=0%7Cred)
+- Open Pipes ![image](http://registry.my-netdata.io/api/v1/badge.svg?chart=apps.pipes&dimensions=sql&value_color=green=0%7Cred)
+- Open Sockets ![image](http://registry.my-netdata.io/api/v1/badge.svg?chart=apps.sockets&dimensions=sql&value_color=green%3E=3%7Cred)
+
+
+For more information about badges check [Generating Badges](../../web/api/badges)
+
+## Comparison with console tools
+
+Ssh to a server running netdata and execute this:
+
+```sh
+while true; do ls -l /var/run >/dev/null; done
+```
+
+In most systems `/var/run` is a `tmpfs` device, so there is nothing that can stop this command
+from consuming entirely one of the CPU cores of the machine.
+
+As we will see below, **none** of the console performance monitoring tools can report that this
+command is using 100% CPU. They do report of course that the CPU is busy, but **they fail to
+identify the process that consumes so much CPU**.
+
+Here is what common Linux console monitoring tools report:
+
+#### top
+
+`top` reports that `bash` is using just 14%.
+
+If you check the total system CPU utilization, it says there is no idle CPU at all, but `top`
+fails to provide a breakdown of the CPU consumption in the system. The sum of the CPU utilization
+of all processes reported by `top`, is 15.6%.
+
+```
+top - 18:46:28 up 3 days, 20:14, 2 users, load average: 0.22, 0.05, 0.02
+Tasks: 76 total, 2 running, 74 sleeping, 0 stopped, 0 zombie
+%Cpu(s): 32.8 us, 65.6 sy, 0.0 ni, 0.0 id, 0.0 wa, 1.3 hi, 0.3 si, 0.0 st
+KiB Mem : 1016576 total, 244112 free, 52012 used, 720452 buff/cache
+KiB Swap: 0 total, 0 free, 0 used. 753712 avail Mem
+
+ PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
+12789 root 20 0 14980 4180 3020 S 14.0 0.4 0:02.82 bash
+ 9 root 20 0 0 0 0 S 1.0 0.0 0:22.36 rcuos/0
+ 642 netdata 20 0 132024 20112 2660 S 0.3 2.0 14:26.29 netdata
+12522 netdata 20 0 9508 2476 1828 S 0.3 0.2 0:02.26 apps.plugin
+ 1 root 20 0 67196 10216 7500 S 0.0 1.0 0:04.83 systemd
+ 2 root 20 0 0 0 0 S 0.0 0.0 0:00.00 kthreadd
+```
+
+#### htop
+
+Exactly like `top`, `htop` is providing an incomplete breakdown of the system CPU utilization.
+
+```
+ CPU[||||||||||||||||||||||||100.0%] Tasks: 27, 11 thr; 2 running
+ Mem[||||||||||||||||||||85.4M/993M] Load average: 1.16 0.88 0.90
+ Swp[ 0K/0K] Uptime: 3 days, 21:37:03
+
+ PID USER PRI NI VIRT RES SHR S CPU% MEM% TIME+ Command
+12789 root 20 0 15104 4484 3208 S 14.0 0.4 10:57.15 -bash
+ 7024 netdata 20 0 9544 2480 1744 S 0.7 0.2 0:00.88 /usr/libexec/netd
+ 7009 netdata 20 0 138M 21016 2712 S 0.7 2.1 0:00.89 /usr/sbin/netdata
+ 7012 netdata 20 0 138M 21016 2712 S 0.0 2.1 0:00.31 /usr/sbin/netdata
+ 563 root 20 0 308M 202M 202M S 0.0 20.4 1:00.81 /usr/lib/systemd/
+ 7019 netdata 20 0 138M 21016 2712 S 0.0 2.1 0:00.14 /usr/sbin/netdata
+```
+
+#### atop
+
+`atop` also fails to break down CPU usage.
+
+```
+ATOP - localhost 2016/12/10 20:11:27 ----------- 10s elapsed
+PRC | sys 1.13s | user 0.43s | #proc 75 | #zombie 0 | #exit 5383 |
+CPU | sys 67% | user 31% | irq 2% | idle 0% | wait 0% |
+CPL | avg1 1.34 | avg5 1.05 | avg15 0.96 | csw 51346 | intr 10508 |
+MEM | tot 992.8M | free 211.5M | cache 470.0M | buff 87.2M | slab 164.7M |
+SWP | tot 0.0M | free 0.0M | | vmcom 207.6M | vmlim 496.4M |
+DSK | vda | busy 0% | read 0 | write 4 | avio 1.50 ms |
+NET | transport | tcpi 16 | tcpo 15 | udpi 0 | udpo 0 |
+NET | network | ipi 16 | ipo 15 | ipfrw 0 | deliv 16 |
+NET | eth0 ---- | pcki 16 | pcko 15 | si 1 Kbps | so 4 Kbps |
+
+ PID SYSCPU USRCPU VGROW RGROW RDDSK WRDSK ST EXC S CPU CMD 1/600
+12789 0.98s 0.40s 0K 0K 0K 336K -- - S 14% bash
+ 9 0.08s 0.00s 0K 0K 0K 0K -- - S 1% rcuos/0
+ 7024 0.03s 0.00s 0K 0K 0K 0K -- - S 0% apps.plugin
+ 7009 0.01s 0.01s 0K 0K 0K 4K -- - S 0% netdata
+```
+
+#### glances
+
+And the same is true for `glances`. The system runs at 100%, but `glances` reports only 17%
+per process utilization.
+
+Note also, that being a `python` program, `glances` uses 1.6% CPU while it runs.
+
+
+```
+localhost Uptime: 3 days, 21:42:00
+
+CPU [100.0%] CPU 100.0% MEM 23.7% SWAP 0.0% LOAD 1-core
+MEM [ 23.7%] user: 30.9% total: 993M total: 0 1 min: 1.18
+SWAP [ 0.0%] system: 67.8% used: 236M used: 0 5 min: 1.08
+ idle: 0.0% free: 757M free: 0 15 min: 1.00
+
+NETWORK Rx/s Tx/s TASKS 75 (90 thr), 1 run, 74 slp, 0 oth
+eth0 168b 2Kb
+eth1 0b 0b CPU% MEM% PID USER NI S Command
+lo 0b 0b 13.5 0.4 12789 root 0 S -bash
+ 1.6 2.2 7025 root 0 R /usr/bin/python /u
+DISK I/O R/s W/s 1.0 0.0 9 root 0 S rcuos/0
+vda1 0 4K 0.3 0.2 7024 netdata 0 S /usr/libexec/netda
+ 0.3 0.0 7 root 0 S rcu_sched
+FILE SYS Used Total 0.3 2.1 7009 netdata 0 S /usr/sbin/netdata
+/ (vda1) 1.56G 29.5G 0.0 0.0 17 root 0 S oom_reaper
+```
+
+#### why this happens?
+
+All the console tools report usage based on the processes found running *at the moment they
+examine the process tree*. So, they see just one `ls` command, which is actually very quick
+with minor CPU utilization. But the shell, is spawning hundreds of them, one after another
+(much like shell scripts do).
+
+#### what netdata reports?
+
+The total CPU utilization of the system:
+
+![image](https://cloud.githubusercontent.com/assets/2662304/21076212/9198e5a6-bf2e-11e6-9bc0-6bdea25befb2.png)
+<br/>_**Figure 1**: The system overview section at netdata, just a few seconds after the command was run_
+
+And at the applications `apps.plugin` breaks down CPU usage per application:
+
+![image](https://cloud.githubusercontent.com/assets/2662304/21076220/c9687848-bf2e-11e6-8d81-348592c5aca2.png)
+<br/>_**Figure 2**: The Applications section at netdata, just a few seconds after the command was run_
+
+So, the `ssh` session is using 95% CPU time.
+
+Why `ssh`?
+
+`apps.plugin` groups all processes based on its configuration file
+[`/etc/netdata/apps_groups.conf`](apps_groups.conf)
+(to edit it on your system run `/etc/netdata/edit-config apps_groups.conf`).
+The default configuration has nothing for `bash`, but it has for `sshd`, so netdata accumulates
+all ssh sessions to a dimension on the charts, called `ssh`. This includes all the processes in
+the process tree of `sshd`, **including the exited children**.
+
+> Distributions based on `systemd`, provide another way to get cpu utilization per user session
+> or service running: control groups, or cgroups, commonly used as part of containers
+> `apps.plugin` does not use these mechanisms. The process grouping made by `apps.plugin` works
+> on any Linux, `systemd` based or not.
+
+#### a more technical description of how netdata works
+
+netdata reads `/proc/<pid>/stat` for all processes, once per second and extracts `utime` and
+`stime` (user and system cpu utilization), much like all the console tools do.
+
+But it [also extracts `cutime` and `cstime`](https://github.com/netdata/netdata/blob/62596cc6b906b1564657510ca9135c08f6d4cdda/src/apps_plugin.c#L636-L642)
+that account the user and system time of the exit children of each process. By keeping a map in
+memory of the whole process tree, it is capable of assigning the right time to every process,
+taking into account all its exited children.
+
+It is tricky, since a process may be running for 1 hour and once it exits, its parent should not
+receive the whole 1 hour of cpu time in just 1 second - you have to subtract the cpu time that has
+been reported for it prior to this iteration.
+
+It is even trickier, because walking through the entire process tree takes some time itself. So,
+if you sum the CPU utilization of all processes, you might have more CPU time than the reported
+total cpu time of the system. netdata solves this, by adapting the per process cpu utilization to
+the total of the system. [Netdata adds charts that document this normalization](https://london.my-netdata.io/default.html#menu_netdata_submenu_apps_plugin).
diff --git a/collectors/apps.plugin/apps_groups.conf b/collectors/apps.plugin/apps_groups.conf
new file mode 100644
index 000000000..c0d22fac9
--- /dev/null
+++ b/collectors/apps.plugin/apps_groups.conf
@@ -0,0 +1,286 @@
+#
+# apps.plugin process grouping
+#
+# The apps.plugin displays charts with information about the processes running.
+# This config allows grouping processes together, so that several processes
+# will be reported as one.
+#
+# Only groups in this file are reported. All other processes will be reported
+# as 'other'.
+#
+# For each process given, its whole process tree will be grouped, not just
+# the process matched. The plugin will include both parents and childs.
+#
+# The format is:
+#
+# group: process1 process2 process3 ...
+#
+# Each group can be given multiple times, to add more processes to it.
+#
+# The process names are the ones returned by:
+#
+# - ps -e or /proc/PID/stat
+# - in case of substring mode (see below): /proc/PID/cmdline
+#
+# To add process names with spaces, enclose them in quotes (single or double)
+# example: 'Plex Media Serv' "my other process".
+#
+# Wildcard support:
+# You can add an asterisk (*) at the beginning and/or the end of a process:
+#
+# *name suffix mode: will search for processes ending with 'name'
+# (/proc/PID/stat)
+#
+# name* prefix mode: will search for processes beginning with 'name'
+# (/proc/PID/stat)
+#
+# *name* substring mode: will search for 'name' in the whole command line
+# (/proc/PID/cmdline)
+#
+# If you enter even just one *name* (substring), apps.plugin will process
+# /proc/PID/cmdline for all processes, just once (when they are first seen).
+#
+# To add processes with single quotes, enclose them in double quotes
+# example: "process with this ' single quote"
+#
+# To add processes with double quotes, enclose them in single quotes:
+# example: 'process with this " double quote'
+#
+# If a group or process name starts with a -, the dimension will be hidden
+# (cpu chart only).
+#
+# If a process starts with a +, debugging will be enabled for it
+# (debugging produces a lot of output - do not enable it in production systems)
+#
+# You can add any number of groups you like. Only the ones found running will
+# affect the charts generated. However, producing charts with hundreds of
+# dimensions may slow down your web browser.
+#
+# The order of the entries in this list is important: the first that matches
+# a process is used, so put important ones at the top. Processes not matched
+# by any row, will inherit it from their parents or children.
+#
+# The order also controls the order of the dimensions on the generated charts
+# (although applications started after apps.plugin is started, will be appended
+# to the existing list of dimensions the netdata daemon maintains).
+
+# -----------------------------------------------------------------------------
+# NETDATA processes accounting
+
+# netdata main process
+netdata: netdata
+
+# netdata known plugins
+# plugins not defined here will be accumulated in netdata, above
+apps.plugin: apps.plugin
+freeipmi.plugin: freeipmi.plugin
+charts.d.plugin: *charts.d.plugin*
+node.d.plugin: *node.d.plugin*
+python.d.plugin: *python.d.plugin*
+tc-qos-helper: *tc-qos-helper.sh*
+fping: fping
+
+# -----------------------------------------------------------------------------
+# authentication/authorization related servers
+
+auth: radius* openldap* ldap*
+fail2ban: fail2ban*
+
+# -----------------------------------------------------------------------------
+# web/ftp servers
+
+httpd: apache* httpd nginx* lighttpd
+proxy: squid* c-icap squidGuard varnish*
+php: php*
+ftpd: proftpd in.tftpd vsftpd
+uwsgi: uwsgi
+unicorn: *unicorn*
+puma: *puma*
+
+# -----------------------------------------------------------------------------
+# database servers
+
+sql: mysqld* mariad* postgres* postmaster* oracle_* ora_*
+nosql: mongod redis* memcached *couchdb*
+timedb: prometheus *carbon-cache.py* *carbon-aggregator.py* *graphite/manage.py* *net.opentsdb.tools.TSDMain*
+
+# -----------------------------------------------------------------------------
+# email servers
+
+email: dovecot imapd pop3d amavis* master zmstat* zmmailboxdmgr qmgr oqmgr saslauthd opendkim clamd freshclam unbound tlsmgr postfwd2 postscreen postfix smtp* lmtp* sendmail
+
+# -----------------------------------------------------------------------------
+# network, routing, VPN
+
+ppp: ppp*
+vpn: openvpn pptp* cjdroute gvpe tincd
+wifi: hostapd wpa_supplicant NetworkManager
+routing: ospfd* ospf6d* bgpd isisd ripd ripngd pimd ldpd zebra vtysh bird*
+modem: ModemManager
+
+# -----------------------------------------------------------------------------
+# high availability and balancers
+
+camo: *camo*
+balancer: ipvs_* haproxy
+ha: corosync hs_logd ha_logd stonithd pacemakerd lrmd crmd
+
+# -----------------------------------------------------------------------------
+# telephony
+
+pbx: asterisk safe_asterisk *vicidial*
+sip: opensips* stund
+
+# -----------------------------------------------------------------------------
+# chat
+
+chat: irssi *vines* *prosody* murmurd
+
+# -----------------------------------------------------------------------------
+# monitoring
+
+logs: ulogd* syslog* rsyslog* logrotate systemd-journald rotatelogs
+nms: snmpd vnstatd smokeping zabbix* monit munin* mon openhpid watchdog tailon nrpe
+splunk: splunkd
+azure: mdsd *waagent* *omiserver* *omiagent* hv_kvp_daemon hv_vss_daemon *auoms* *omsagent*
+
+# -----------------------------------------------------------------------------
+# storage, file systems and file servers
+
+ceph: ceph-mds ceph-mgr ceph-mon ceph-osd radosgw* rbd-*
+samba: smbd nmbd winbindd
+nfs: rpcbind rpc.* nfs*
+zfs: spl_* z_* txg_* zil_* arc_* l2arc*
+btrfs: btrfs*
+iscsi: iscsid iscsi_eh
+
+# -----------------------------------------------------------------------------
+# containers & virtual machines
+
+containers: lxc* docker*
+VMs: vbox* VBox* qemu*
+
+# -----------------------------------------------------------------------------
+# ssh servers and clients
+
+ssh: ssh* scp dropbear
+
+# -----------------------------------------------------------------------------
+# print servers and clients
+
+print: cups* lpd lpq
+
+# -----------------------------------------------------------------------------
+# time servers and clients
+
+time: ntp* systemd-timesyncd chronyd
+
+# -----------------------------------------------------------------------------
+# dhcp servers and clients
+
+dhcp: *dhcp*
+
+# -----------------------------------------------------------------------------
+# name servers and clients
+
+named: named rncd dig
+dnsdist: dnsdist
+
+# -----------------------------------------------------------------------------
+# installation / compilation / debugging
+
+build: cc1 cc1plus as gcc* cppcheck ld make cmake automake autoconf autoreconf
+build: git gdb valgrind*
+
+# -----------------------------------------------------------------------------
+# antivirus
+
+antivirus: clam* *clam
+
+# -----------------------------------------------------------------------------
+# torrent clients
+
+torrents: *deluge* transmission* *SickBeard* *CouchPotato* *rtorrent*
+
+# -----------------------------------------------------------------------------
+# backup servers and clients
+
+backup: rsync bacula*
+
+# -----------------------------------------------------------------------------
+# cron
+
+cron: cron* atd anacron systemd-cron*
+
+# -----------------------------------------------------------------------------
+# UPS
+
+ups: upsmon upsd */nut/*
+
+# -----------------------------------------------------------------------------
+# media players, servers, clients
+
+media: mplayer vlc xine mediatomb omxplayer* kodi* xbmc* mediacenter eventlircd
+media: mpd minidlnad mt-daapd avahi* Plex*
+
+# -----------------------------------------------------------------------------
+# java applications
+
+hdfsdatanode: *org.apache.hadoop.hdfs.server.datanode.DataNode*
+hdfsnamenode: *org.apache.hadoop.hdfs.server.namenode.NameNode*
+hdfsjournalnode: *org.apache.hadoop.hdfs.qjournal.server.JournalNode*
+hdfszkfc: *org.apache.hadoop.hdfs.tools.DFSZKFailoverController*
+
+yarnnode: *org.apache.hadoop.yarn.server.nodemanager.NodeManager*
+yarnmgr: *org.apache.hadoop.yarn.server.resourcemanager.ResourceManager*
+yarnproxy: *org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer*
+
+sparkworker: *org.apache.spark.deploy.worker.Worker*
+sparkmaster: *org.apache.spark.deploy.master.Master*
+
+hbaseregion: *org.apache.hadoop.hbase.regionserver.HRegionServer*
+hbaserest: *org.apache.hadoop.hbase.rest.RESTServer*
+hbasethrift: *org.apache.hadoop.hbase.thrift.ThriftServer*
+hbasemaster: *org.apache.hadoop.hbase.master.HMaster*
+
+zookeeper: *org.apache.zookeeper.server.quorum.QuorumPeerMain*
+
+hive2: *org.apache.hive.service.server.HiveServer2*
+hivemetastore: *org.apache.hadoop.hive.metastore.HiveMetaStore*
+
+solr: *solr.install.dir*
+
+airflow: *airflow*
+
+# -----------------------------------------------------------------------------
+# X
+
+X: X Xorg xinit lightdm xdm pulseaudio gkrellm xfwm4 xfdesktop xfce* Thunar
+X: xfsettingsd xfconfd gnome-* gdm gconf* dconf* xfconf* *gvfs gvfs* kdm slim
+X: evolution-* firefox chromium opera vivaldi-bin epiphany WebKit*
+X: '*systemd --user*' chrome *chrome-sandbox* *google-chrome* *chromium* *firefox*
+
+# -----------------------------------------------------------------------------
+# Kernel / System
+
+ksmd: ksmd
+
+system: systemd-* udisks* udevd* *udevd connmand ipv6_addrconf dbus-* rtkit*
+system: inetd xinetd mdadm polkitd acpid uuidd packagekitd upowerd colord
+system: accounts-daemon rngd haveged
+
+kernel: kthreadd kauditd lockd khelper kdevtmpfs khungtaskd rpciod
+kernel: fsnotify_mark kthrotld deferwq scsi_*
+
+# -----------------------------------------------------------------------------
+# other application servers
+
+kafka: *kafka.Kafka*
+
+rabbitmq: *rabbitmq*
+
+sidekiq: *sidekiq*
+java: java
+ipfs: ipfs
+
+node: node
diff --git a/collectors/apps.plugin/apps_plugin.c b/collectors/apps.plugin/apps_plugin.c
new file mode 100644
index 000000000..f592e9fc8
--- /dev/null
+++ b/collectors/apps.plugin/apps_plugin.c
@@ -0,0 +1,3799 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+/*
+ * netdata apps.plugin
+ * (C) Copyright 2016-2017 Costa Tsaousis <costa@tsaousis.gr>
+ * Released under GPL v3+
+ */
+
+#include "../../libnetdata/libnetdata.h"
+
+// ----------------------------------------------------------------------------
+
+// callback required by fatal()
+void netdata_cleanup_and_exit(int ret) {
+ exit(ret);
+}
+
+// callbacks required by popen()
+void signals_block(void) {};
+void signals_unblock(void) {};
+void signals_reset(void) {};
+
+// callback required by eval()
+int health_variable_lookup(const char *variable, uint32_t hash, struct rrdcalc *rc, calculated_number *result) {
+ (void)variable;
+ (void)hash;
+ (void)rc;
+ (void)result;
+ return 0;
+};
+
+// required by get_system_cpus()
+char *netdata_configured_host_prefix = "";
+
+
+// ----------------------------------------------------------------------------
+// debugging
+
+static int debug_enabled = 0;
+static inline void debug_log_int(const char *fmt, ... ) {
+ va_list args;
+
+ fprintf( stderr, "apps.plugin: ");
+ va_start( args, fmt );
+ vfprintf( stderr, fmt, args );
+ va_end( args );
+
+ fputc('\n', stderr);
+}
+
+#ifdef NETDATA_INTERNAL_CHECKS
+
+#define debug_log(fmt, args...) do { if(unlikely(debug_enabled)) debug_log_int(fmt, ##args); } while(0)
+
+#else
+
+static inline void debug_log_dummy(void) {}
+#define debug_log(fmt, args...) debug_log_dummy()
+
+#endif
+
+
+// ----------------------------------------------------------------------------
+
+#ifdef __FreeBSD__
+#include <sys/user.h>
+#endif
+
+// ----------------------------------------------------------------------------
+// per O/S configuration
+
+// the minimum PID of the system
+// this is also the pid of the init process
+#define INIT_PID 1
+
+// if the way apps.plugin will work, will read the entire process list,
+// including the resource utilization of each process, instantly
+// set this to 1
+// when set to 0, apps.plugin builds a sort list of processes, in order
+// to process children processes, before parent processes
+#ifdef __FreeBSD__
+#define ALL_PIDS_ARE_READ_INSTANTLY 1
+#else
+#define ALL_PIDS_ARE_READ_INSTANTLY 0
+#endif
+
+// ----------------------------------------------------------------------------
+// string lengths
+
+#define MAX_COMPARE_NAME 100
+#define MAX_NAME 100
+#define MAX_CMDLINE 16384
+
+// ----------------------------------------------------------------------------
+// the rates we are going to send to netdata will have this detail a value of:
+// - 1 will send just integer parts to netdata
+// - 100 will send 2 decimal points
+// - 1000 will send 3 decimal points
+// etc.
+#define RATES_DETAIL 10000ULL
+
+
+// ----------------------------------------------------------------------------
+// to avoid reallocating too frequently, we can increase the number of spare
+// file descriptors used by processes.
+// IMPORTANT:
+// having a lot of spares, increases the CPU utilization of the plugin.
+#define MAX_SPARE_FDS 1
+
+
+// ----------------------------------------------------------------------------
+// command line options
+
+static int
+ update_every = 1,
+ enable_guest_charts = 0,
+#ifdef __FreeBSD__
+ enable_file_charts = 0,
+#else
+ enable_file_charts = 1,
+ max_fds_cache_seconds = 60,
+#endif
+ enable_users_charts = 1,
+ enable_groups_charts = 1,
+ include_exited_childs = 1;
+
+// will be changed to getenv(NETDATA_USER_CONFIG_DIR) if it exists
+static char *user_config_dir = CONFIG_DIR;
+static char *stock_config_dir = LIBCONFIG_DIR;
+
+// ----------------------------------------------------------------------------
+// internal flags
+// handled in code (automatically set)
+
+static int
+ show_guest_time = 0, // 1 when guest values are collected
+ show_guest_time_old = 0,
+ proc_pid_cmdline_is_needed = 0; // 1 when we need to read /proc/cmdline
+
+
+// ----------------------------------------------------------------------------
+// internal counters
+
+static size_t
+ global_iterations_counter = 1,
+ calls_counter = 0,
+ file_counter = 0,
+ filenames_allocated_counter = 0,
+ inodes_changed_counter = 0,
+ links_changed_counter = 0,
+ targets_assignment_counter = 0;
+
+
+// ----------------------------------------------------------------------------
+// Normalization
+//
+// With normalization we lower the collected metrics by a factor to make them
+// match the total utilization of the system.
+// The discrepancy exists because apps.plugin needs some time to collect all
+// the metrics. This results in utilization that exceeds the total utilization
+// of the system.
+//
+// With normalization we align the per-process utilization, to the total of
+// the system. We first consume the exited children utilization and it the
+// collected values is above the total, we proportionally scale each reported
+// metric.
+
+// the total system time, as reported by /proc/stat
+#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
+static kernel_uint_t
+ global_utime = 0,
+ global_stime = 0,
+ global_gtime = 0;
+#endif
+
+// the normalization ratios, as calculated by normalize_utilization()
+double utime_fix_ratio = 1.0,
+ stime_fix_ratio = 1.0,
+ gtime_fix_ratio = 1.0,
+ minflt_fix_ratio = 1.0,
+ majflt_fix_ratio = 1.0,
+ cutime_fix_ratio = 1.0,
+ cstime_fix_ratio = 1.0,
+ cgtime_fix_ratio = 1.0,
+ cminflt_fix_ratio = 1.0,
+ cmajflt_fix_ratio = 1.0;
+
+// ----------------------------------------------------------------------------
+// target
+//
+// target is the structure that processes are aggregated to be reported
+// to netdata.
+//
+// - Each entry in /etc/apps_groups.conf creates a target.
+// - Each user and group used by a process in the system, creates a target.
+
+struct target {
+ char compare[MAX_COMPARE_NAME + 1];
+ uint32_t comparehash;
+ size_t comparelen;
+
+ char id[MAX_NAME + 1];
+ uint32_t idhash;
+
+ char name[MAX_NAME + 1];
+
+ uid_t uid;
+ gid_t gid;
+
+ kernel_uint_t minflt;
+ kernel_uint_t cminflt;
+ kernel_uint_t majflt;
+ kernel_uint_t cmajflt;
+ kernel_uint_t utime;
+ kernel_uint_t stime;
+ kernel_uint_t gtime;
+ kernel_uint_t cutime;
+ kernel_uint_t cstime;
+ kernel_uint_t cgtime;
+ kernel_uint_t num_threads;
+ // kernel_uint_t rss;
+
+ kernel_uint_t status_vmsize;
+ kernel_uint_t status_vmrss;
+ kernel_uint_t status_vmshared;
+ kernel_uint_t status_rssfile;
+ kernel_uint_t status_rssshmem;
+ kernel_uint_t status_vmswap;
+
+ kernel_uint_t io_logical_bytes_read;
+ kernel_uint_t io_logical_bytes_written;
+ // kernel_uint_t io_read_calls;
+ // kernel_uint_t io_write_calls;
+ kernel_uint_t io_storage_bytes_read;
+ kernel_uint_t io_storage_bytes_written;
+ // kernel_uint_t io_cancelled_write_bytes;
+
+ int *target_fds;
+ int target_fds_size;
+
+ kernel_uint_t openfiles;
+ kernel_uint_t openpipes;
+ kernel_uint_t opensockets;
+ kernel_uint_t openinotifies;
+ kernel_uint_t openeventfds;
+ kernel_uint_t opentimerfds;
+ kernel_uint_t opensignalfds;
+ kernel_uint_t openeventpolls;
+ kernel_uint_t openother;
+
+ unsigned int processes; // how many processes have been merged to this
+ int exposed; // if set, we have sent this to netdata
+ int hidden; // if set, we set the hidden flag on the dimension
+ int debug_enabled;
+ int ends_with;
+ int starts_with; // if set, the compare string matches only the
+ // beginning of the command
+
+ struct target *target; // the one that will be reported to netdata
+ struct target *next;
+};
+
+struct target
+ *apps_groups_default_target = NULL, // the default target
+ *apps_groups_root_target = NULL, // apps_groups.conf defined
+ *users_root_target = NULL, // users
+ *groups_root_target = NULL; // user groups
+
+size_t
+ apps_groups_targets_count = 0; // # of apps_groups.conf targets
+
+
+// ----------------------------------------------------------------------------
+// pid_stat
+//
+// structure to store data for each process running
+// see: man proc for the description of the fields
+
+struct pid_fd {
+ int fd;
+
+#ifndef __FreeBSD__
+ ino_t inode;
+ char *filename;
+ uint32_t link_hash;
+ size_t cache_iterations_counter;
+ size_t cache_iterations_reset;
+#endif
+};
+
+struct pid_stat {
+ int32_t pid;
+ char comm[MAX_COMPARE_NAME + 1];
+ char *cmdline;
+
+ uint32_t log_thrown;
+
+ // char state;
+ int32_t ppid;
+ // int32_t pgrp;
+ // int32_t session;
+ // int32_t tty_nr;
+ // int32_t tpgid;
+ // uint64_t flags;
+
+ // these are raw values collected
+ kernel_uint_t minflt_raw;
+ kernel_uint_t cminflt_raw;
+ kernel_uint_t majflt_raw;
+ kernel_uint_t cmajflt_raw;
+ kernel_uint_t utime_raw;
+ kernel_uint_t stime_raw;
+ kernel_uint_t gtime_raw; // guest_time
+ kernel_uint_t cutime_raw;
+ kernel_uint_t cstime_raw;
+ kernel_uint_t cgtime_raw; // cguest_time
+
+ // these are rates
+ kernel_uint_t minflt;
+ kernel_uint_t cminflt;
+ kernel_uint_t majflt;
+ kernel_uint_t cmajflt;
+ kernel_uint_t utime;
+ kernel_uint_t stime;
+ kernel_uint_t gtime;
+ kernel_uint_t cutime;
+ kernel_uint_t cstime;
+ kernel_uint_t cgtime;
+
+ // int64_t priority;
+ // int64_t nice;
+ int32_t num_threads;
+ // int64_t itrealvalue;
+ // kernel_uint_t starttime;
+ // kernel_uint_t vsize;
+ // kernel_uint_t rss;
+ // kernel_uint_t rsslim;
+ // kernel_uint_t starcode;
+ // kernel_uint_t endcode;
+ // kernel_uint_t startstack;
+ // kernel_uint_t kstkesp;
+ // kernel_uint_t kstkeip;
+ // uint64_t signal;
+ // uint64_t blocked;
+ // uint64_t sigignore;
+ // uint64_t sigcatch;
+ // uint64_t wchan;
+ // uint64_t nswap;
+ // uint64_t cnswap;
+ // int32_t exit_signal;
+ // int32_t processor;
+ // uint32_t rt_priority;
+ // uint32_t policy;
+ // kernel_uint_t delayacct_blkio_ticks;
+
+ uid_t uid;
+ gid_t gid;
+
+ kernel_uint_t status_vmsize;
+ kernel_uint_t status_vmrss;
+ kernel_uint_t status_vmshared;
+ kernel_uint_t status_rssfile;
+ kernel_uint_t status_rssshmem;
+ kernel_uint_t status_vmswap;
+#ifndef __FreeBSD__
+ ARL_BASE *status_arl;
+#endif
+
+ kernel_uint_t io_logical_bytes_read_raw;
+ kernel_uint_t io_logical_bytes_written_raw;
+ // kernel_uint_t io_read_calls_raw;
+ // kernel_uint_t io_write_calls_raw;
+ kernel_uint_t io_storage_bytes_read_raw;
+ kernel_uint_t io_storage_bytes_written_raw;
+ // kernel_uint_t io_cancelled_write_bytes_raw;
+
+ kernel_uint_t io_logical_bytes_read;
+ kernel_uint_t io_logical_bytes_written;
+ // kernel_uint_t io_read_calls;
+ // kernel_uint_t io_write_calls;
+ kernel_uint_t io_storage_bytes_read;
+ kernel_uint_t io_storage_bytes_written;
+ // kernel_uint_t io_cancelled_write_bytes;
+
+ struct pid_fd *fds; // array of fds it uses
+ size_t fds_size; // the size of the fds array
+
+ int children_count; // number of processes directly referencing this
+ unsigned char keep:1; // 1 when we need to keep this process in memory even after it exited
+ int keeploops; // increases by 1 every time keep is 1 and updated 0
+ unsigned char updated:1; // 1 when the process is currently running
+ unsigned char merged:1; // 1 when it has been merged to its parent
+ unsigned char read:1; // 1 when we have already read this process for this iteration
+
+ int sortlist; // higher numbers = top on the process tree
+ // each process gets a unique number
+
+ struct target *target; // app_groups.conf targets
+ struct target *user_target; // uid based targets
+ struct target *group_target; // gid based targets
+
+ usec_t stat_collected_usec;
+ usec_t last_stat_collected_usec;
+
+ usec_t io_collected_usec;
+ usec_t last_io_collected_usec;
+
+ char *fds_dirname; // the full directory name in /proc/PID/fd
+
+ char *stat_filename;
+ char *status_filename;
+ char *io_filename;
+ char *cmdline_filename;
+
+ struct pid_stat *parent;
+ struct pid_stat *prev;
+ struct pid_stat *next;
+};
+
+size_t pagesize;
+
+// log each problem once per process
+// log flood protection flags (log_thrown)
+#define PID_LOG_IO 0x00000001
+#define PID_LOG_STATUS 0x00000002
+#define PID_LOG_CMDLINE 0x00000004
+#define PID_LOG_FDS 0x00000008
+#define PID_LOG_STAT 0x00000010
+
+static struct pid_stat
+ *root_of_pids = NULL, // global list of all processes running
+ **all_pids = NULL; // to avoid allocations, we pre-allocate the
+ // the entire pid space.
+
+static size_t
+ all_pids_count = 0; // the number of processes running
+
+#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
+// Another pre-allocated list of all possible pids.
+// We need it to pids and assign them a unique sortlist id, so that we
+// read parents before children. This is needed to prevent a situation where
+// a child is found running, but until we read its parent, it has exited and
+// its parent has accumulated its resources.
+static pid_t
+ *all_pids_sortlist = NULL;
+#endif
+
+// ----------------------------------------------------------------------------
+// file descriptor
+//
+// this is used to keep a global list of all open files of the system.
+// it is needed in order to calculate the unique files processes have open.
+
+#define FILE_DESCRIPTORS_INCREASE_STEP 100
+
+// types for struct file_descriptor->type
+typedef enum fd_filetype {
+ FILETYPE_OTHER,
+ FILETYPE_FILE,
+ FILETYPE_PIPE,
+ FILETYPE_SOCKET,
+ FILETYPE_INOTIFY,
+ FILETYPE_EVENTFD,
+ FILETYPE_EVENTPOLL,
+ FILETYPE_TIMERFD,
+ FILETYPE_SIGNALFD
+} FD_FILETYPE;
+
+struct file_descriptor {
+ avl avl;
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ uint32_t magic;
+#endif /* NETDATA_INTERNAL_CHECKS */
+
+ const char *name;
+ uint32_t hash;
+
+ FD_FILETYPE type;
+ int count;
+ int pos;
+} *all_files = NULL;
+
+static int
+ all_files_len = 0,
+ all_files_size = 0;
+
+// ----------------------------------------------------------------------------
+// apps_groups.conf
+// aggregate all processes in groups, to have a limited number of dimensions
+
+static struct target *get_users_target(uid_t uid) {
+ struct target *w;
+ for(w = users_root_target ; w ; w = w->next)
+ if(w->uid == uid) return w;
+
+ w = callocz(sizeof(struct target), 1);
+ snprintfz(w->compare, MAX_COMPARE_NAME, "%u", uid);
+ w->comparehash = simple_hash(w->compare);
+ w->comparelen = strlen(w->compare);
+
+ snprintfz(w->id, MAX_NAME, "%u", uid);
+ w->idhash = simple_hash(w->id);
+
+ struct passwd *pw = getpwuid(uid);
+ if(!pw || !pw->pw_name || !*pw->pw_name)
+ snprintfz(w->name, MAX_NAME, "%u", uid);
+ else
+ snprintfz(w->name, MAX_NAME, "%s", pw->pw_name);
+
+ netdata_fix_chart_name(w->name);
+
+ w->uid = uid;
+
+ w->next = users_root_target;
+ users_root_target = w;
+
+ debug_log("added uid %u ('%s') target", w->uid, w->name);
+
+ return w;
+}
+
+struct target *get_groups_target(gid_t gid)
+{
+ struct target *w;
+ for(w = groups_root_target ; w ; w = w->next)
+ if(w->gid == gid) return w;
+
+ w = callocz(sizeof(struct target), 1);
+ snprintfz(w->compare, MAX_COMPARE_NAME, "%u", gid);
+ w->comparehash = simple_hash(w->compare);
+ w->comparelen = strlen(w->compare);
+
+ snprintfz(w->id, MAX_NAME, "%u", gid);
+ w->idhash = simple_hash(w->id);
+
+ struct group *gr = getgrgid(gid);
+ if(!gr || !gr->gr_name || !*gr->gr_name)
+ snprintfz(w->name, MAX_NAME, "%u", gid);
+ else
+ snprintfz(w->name, MAX_NAME, "%s", gr->gr_name);
+
+ netdata_fix_chart_name(w->name);
+
+ w->gid = gid;
+
+ w->next = groups_root_target;
+ groups_root_target = w;
+
+ debug_log("added gid %u ('%s') target", w->gid, w->name);
+
+ return w;
+}
+
+// find or create a new target
+// there are targets that are just aggregated to other target (the second argument)
+static struct target *get_apps_groups_target(const char *id, struct target *target, const char *name) {
+ int tdebug = 0, thidden = target?target->hidden:0, ends_with = 0;
+ const char *nid = id;
+
+ // extract the options
+ while(nid[0] == '-' || nid[0] == '+' || nid[0] == '*') {
+ if(nid[0] == '-') thidden = 1;
+ if(nid[0] == '+') tdebug = 1;
+ if(nid[0] == '*') ends_with = 1;
+ nid++;
+ }
+ uint32_t hash = simple_hash(id);
+
+ // find if it already exists
+ struct target *w, *last = apps_groups_root_target;
+ for(w = apps_groups_root_target ; w ; w = w->next) {
+ if(w->idhash == hash && strncmp(nid, w->id, MAX_NAME) == 0)
+ return w;
+
+ last = w;
+ }
+
+ // find an existing target
+ if(unlikely(!target)) {
+ while(*name == '-') {
+ if(*name == '-') thidden = 1;
+ name++;
+ }
+
+ for(target = apps_groups_root_target ; target != NULL ; target = target->next) {
+ if(!target->target && strcmp(name, target->name) == 0)
+ break;
+ }
+
+ if(unlikely(debug_enabled)) {
+ if(unlikely(target))
+ debug_log("REUSING TARGET NAME '%s' on ID '%s'", target->name, target->id);
+ else
+ debug_log("NEW TARGET NAME '%s' on ID '%s'", name, id);
+ }
+ }
+
+ if(target && target->target)
+ fatal("Internal Error: request to link process '%s' to target '%s' which is linked to target '%s'", id, target->id, target->target->id);
+
+ w = callocz(sizeof(struct target), 1);
+ strncpyz(w->id, nid, MAX_NAME);
+ w->idhash = simple_hash(w->id);
+
+ if(unlikely(!target))
+ // copy the name
+ strncpyz(w->name, name, MAX_NAME);
+ else
+ // copy the id
+ strncpyz(w->name, nid, MAX_NAME);
+
+ strncpyz(w->compare, nid, MAX_COMPARE_NAME);
+ size_t len = strlen(w->compare);
+ if(w->compare[len - 1] == '*') {
+ w->compare[len - 1] = '\0';
+ w->starts_with = 1;
+ }
+ w->ends_with = ends_with;
+
+ if(w->starts_with && w->ends_with)
+ proc_pid_cmdline_is_needed = 1;
+
+ w->comparehash = simple_hash(w->compare);
+ w->comparelen = strlen(w->compare);
+
+ w->hidden = thidden;
+#ifdef NETDATA_INTERNAL_CHECKS
+ w->debug_enabled = tdebug;
+#else
+ if(tdebug)
+ fprintf(stderr, "apps.plugin has been compiled without debugging\n");
+#endif
+ w->target = target;
+
+ // append it, to maintain the order in apps_groups.conf
+ if(last) last->next = w;
+ else apps_groups_root_target = w;
+
+ debug_log("ADDING TARGET ID '%s', process name '%s' (%s), aggregated on target '%s', options: %s %s"
+ , w->id
+ , w->compare, (w->starts_with && w->ends_with)?"substring":((w->starts_with)?"prefix":((w->ends_with)?"suffix":"exact"))
+ , w->target?w->target->name:w->name
+ , (w->hidden)?"hidden":"-"
+ , (w->debug_enabled)?"debug":"-"
+ );
+
+ return w;
+}
+
+// read the apps_groups.conf file
+static int read_apps_groups_conf(const char *path, const char *file)
+{
+ char filename[FILENAME_MAX + 1];
+
+ snprintfz(filename, FILENAME_MAX, "%s/apps_%s.conf", path, file);
+
+ debug_log("process groups file: '%s'", filename);
+
+ // ----------------------------------------
+
+ procfile *ff = procfile_open(filename, " :\t", PROCFILE_FLAG_DEFAULT);
+ if(!ff) return 1;
+
+ procfile_set_quotes(ff, "'\"");
+
+ ff = procfile_readall(ff);
+ if(!ff)
+ return 1;
+
+ size_t line, lines = procfile_lines(ff);
+
+ for(line = 0; line < lines ;line++) {
+ size_t word, words = procfile_linewords(ff, line);
+ if(!words) continue;
+
+ char *name = procfile_lineword(ff, line, 0);
+ if(!name || !*name) continue;
+
+ // find a possibly existing target
+ struct target *w = NULL;
+
+ // loop through all words, skipping the first one (the name)
+ for(word = 0; word < words ;word++) {
+ char *s = procfile_lineword(ff, line, word);
+ if(!s || !*s) continue;
+ if(*s == '#') break;
+
+ // is this the first word? skip it
+ if(s == name) continue;
+
+ // add this target
+ struct target *n = get_apps_groups_target(s, w, name);
+ if(!n) {
+ error("Cannot create target '%s' (line %zu, word %zu)", s, line, word);
+ continue;
+ }
+
+ // just some optimization
+ // to avoid searching for a target for each process
+ if(!w) w = n->target?n->target:n;
+ }
+ }
+
+ procfile_close(ff);
+
+ apps_groups_default_target = get_apps_groups_target("p+!o@w#e$i^r&7*5(-i)l-o_", NULL, "other"); // match nothing
+ if(!apps_groups_default_target)
+ fatal("Cannot create default target");
+
+ // allow the user to override group 'other'
+ if(apps_groups_default_target->target)
+ apps_groups_default_target = apps_groups_default_target->target;
+
+ return 0;
+}
+
+
+// ----------------------------------------------------------------------------
+// struct pid_stat management
+static inline void init_pid_fds(struct pid_stat *p, size_t first, size_t size);
+
+static inline struct pid_stat *get_pid_entry(pid_t pid) {
+ if(unlikely(all_pids[pid]))
+ return all_pids[pid];
+
+ struct pid_stat *p = callocz(sizeof(struct pid_stat), 1);
+ p->fds = mallocz(sizeof(struct pid_fd) * MAX_SPARE_FDS);
+ p->fds_size = MAX_SPARE_FDS;
+ init_pid_fds(p, 0, p->fds_size);
+
+ if(likely(root_of_pids))
+ root_of_pids->prev = p;
+
+ p->next = root_of_pids;
+ root_of_pids = p;
+
+ p->pid = pid;
+
+ all_pids[pid] = p;
+ all_pids_count++;
+
+ return p;
+}
+
+static inline void del_pid_entry(pid_t pid) {
+ struct pid_stat *p = all_pids[pid];
+
+ if(unlikely(!p)) {
+ error("attempted to free pid %d that is not allocated.", pid);
+ return;
+ }
+
+ debug_log("process %d %s exited, deleting it.", pid, p->comm);
+
+ if(root_of_pids == p)
+ root_of_pids = p->next;
+
+ if(p->next) p->next->prev = p->prev;
+ if(p->prev) p->prev->next = p->next;
+
+ // free the filename
+#ifndef __FreeBSD__
+ {
+ size_t i;
+ for(i = 0; i < p->fds_size; i++)
+ if(p->fds[i].filename)
+ freez(p->fds[i].filename);
+ }
+#endif
+ freez(p->fds);
+
+ freez(p->fds_dirname);
+ freez(p->stat_filename);
+ freez(p->status_filename);
+#ifndef __FreeBSD__
+ arl_free(p->status_arl);
+#endif
+ freez(p->io_filename);
+ freez(p->cmdline_filename);
+ freez(p->cmdline);
+ freez(p);
+
+ all_pids[pid] = NULL;
+ all_pids_count--;
+}
+
+// ----------------------------------------------------------------------------
+
+static inline int managed_log(struct pid_stat *p, uint32_t log, int status) {
+ if(unlikely(!status)) {
+ // error("command failed log %u, errno %d", log, errno);
+
+ if(unlikely(debug_enabled || errno != ENOENT)) {
+ if(unlikely(debug_enabled || !(p->log_thrown & log))) {
+ p->log_thrown |= log;
+ switch(log) {
+ case PID_LOG_IO:
+ #ifdef __FreeBSD__
+ error("Cannot fetch process %d I/O info (command '%s')", p->pid, p->comm);
+ #else
+ error("Cannot process %s/proc/%d/io (command '%s')", netdata_configured_host_prefix, p->pid, p->comm);
+ #endif
+ break;
+
+ case PID_LOG_STATUS:
+ #ifdef __FreeBSD__
+ error("Cannot fetch process %d status info (command '%s')", p->pid, p->comm);
+ #else
+ error("Cannot process %s/proc/%d/status (command '%s')", netdata_configured_host_prefix, p->pid, p->comm);
+ #endif
+ break;
+
+ case PID_LOG_CMDLINE:
+ #ifdef __FreeBSD__
+ error("Cannot fetch process %d command line (command '%s')", p->pid, p->comm);
+ #else
+ error("Cannot process %s/proc/%d/cmdline (command '%s')", netdata_configured_host_prefix, p->pid, p->comm);
+ #endif
+ break;
+
+ case PID_LOG_FDS:
+ #ifdef __FreeBSD__
+ error("Cannot fetch process %d files (command '%s')", p->pid, p->comm);
+ #else
+ error("Cannot process entries in %s/proc/%d/fd (command '%s')", netdata_configured_host_prefix, p->pid, p->comm);
+ #endif
+ break;
+
+ case PID_LOG_STAT:
+ break;
+
+ default:
+ error("unhandled error for pid %d, command '%s'", p->pid, p->comm);
+ break;
+ }
+ }
+ }
+ errno = 0;
+ }
+ else if(unlikely(p->log_thrown & log)) {
+ // error("unsetting log %u on pid %d", log, p->pid);
+ p->log_thrown &= ~log;
+ }
+
+ return status;
+}
+
+static inline void assign_target_to_pid(struct pid_stat *p) {
+ targets_assignment_counter++;
+
+ uint32_t hash = simple_hash(p->comm);
+ size_t pclen = strlen(p->comm);
+
+ struct target *w;
+ for(w = apps_groups_root_target; w ; w = w->next) {
+ // if(debug_enabled || (p->target && p->target->debug_enabled)) debug_log_int("\t\tcomparing '%s' with '%s'", w->compare, p->comm);
+
+ // find it - 4 cases:
+ // 1. the target is not a pattern
+ // 2. the target has the prefix
+ // 3. the target has the suffix
+ // 4. the target is something inside cmdline
+
+ if(unlikely(( (!w->starts_with && !w->ends_with && w->comparehash == hash && !strcmp(w->compare, p->comm))
+ || (w->starts_with && !w->ends_with && !strncmp(w->compare, p->comm, w->comparelen))
+ || (!w->starts_with && w->ends_with && pclen >= w->comparelen && !strcmp(w->compare, &p->comm[pclen - w->comparelen]))
+ || (proc_pid_cmdline_is_needed && w->starts_with && w->ends_with && p->cmdline && strstr(p->cmdline, w->compare))
+ ))) {
+
+ if(w->target) p->target = w->target;
+ else p->target = w;
+
+ if(debug_enabled || (p->target && p->target->debug_enabled))
+ debug_log_int("%s linked to target %s", p->comm, p->target->name);
+
+ break;
+ }
+ }
+}
+
+
+// ----------------------------------------------------------------------------
+// update pids from proc
+
+static inline int read_proc_pid_cmdline(struct pid_stat *p) {
+ static char cmdline[MAX_CMDLINE + 1];
+
+#ifdef __FreeBSD__
+ size_t i, bytes = MAX_CMDLINE;
+ int mib[4];
+
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_PROC;
+ mib[2] = KERN_PROC_ARGS;
+ mib[3] = p->pid;
+ if (unlikely(sysctl(mib, 4, cmdline, &bytes, NULL, 0)))
+ goto cleanup;
+#else
+ if(unlikely(!p->cmdline_filename)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/proc/%d/cmdline", netdata_configured_host_prefix, p->pid);
+ p->cmdline_filename = strdupz(filename);
+ }
+
+ int fd = open(p->cmdline_filename, procfile_open_flags, 0666);
+ if(unlikely(fd == -1)) goto cleanup;
+
+ ssize_t i, bytes = read(fd, cmdline, MAX_CMDLINE);
+ close(fd);
+
+ if(unlikely(bytes < 0)) goto cleanup;
+#endif
+
+ cmdline[bytes] = '\0';
+ for(i = 0; i < bytes ; i++) {
+ if(unlikely(!cmdline[i])) cmdline[i] = ' ';
+ }
+
+ if(p->cmdline) freez(p->cmdline);
+ p->cmdline = strdupz(cmdline);
+
+ debug_log("Read file '%s' contents: %s", p->cmdline_filename, p->cmdline);
+
+ return 1;
+
+cleanup:
+ // copy the command to the command line
+ if(p->cmdline) freez(p->cmdline);
+ p->cmdline = strdupz(p->comm);
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// macro to calculate the incremental rate of a value
+// each parameter is accessed only ONCE - so it is safe to pass function calls
+// or other macros as parameters
+
+#define incremental_rate(rate_variable, last_kernel_variable, new_kernel_value, collected_usec, last_collected_usec) { \
+ kernel_uint_t _new_tmp = new_kernel_value; \
+ (rate_variable) = (_new_tmp - (last_kernel_variable)) * (USEC_PER_SEC * RATES_DETAIL) / ((collected_usec) - (last_collected_usec)); \
+ (last_kernel_variable) = _new_tmp; \
+ }
+
+// the same macro for struct pid members
+#define pid_incremental_rate(type, var, value) \
+ incremental_rate(var, var##_raw, value, p->type##_collected_usec, p->last_##type##_collected_usec)
+
+
+// ----------------------------------------------------------------------------
+
+#ifndef __FreeBSD__
+struct arl_callback_ptr {
+ struct pid_stat *p;
+ procfile *ff;
+ size_t line;
+};
+
+void arl_callback_status_uid(const char *name, uint32_t hash, const char *value, void *dst) {
+ (void)name; (void)hash; (void)value;
+ struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst;
+ if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 5)) return;
+
+ //const char *real_uid = procfile_lineword(aptr->ff, aptr->line, 1);
+ const char *effective_uid = procfile_lineword(aptr->ff, aptr->line, 2);
+ //const char *saved_uid = procfile_lineword(aptr->ff, aptr->line, 3);
+ //const char *filesystem_uid = procfile_lineword(aptr->ff, aptr->line, 4);
+
+ if(likely(effective_uid && *effective_uid))
+ aptr->p->uid = (uid_t)str2l(effective_uid);
+}
+
+void arl_callback_status_gid(const char *name, uint32_t hash, const char *value, void *dst) {
+ (void)name; (void)hash; (void)value;
+ struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst;
+ if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 5)) return;
+
+ //const char *real_gid = procfile_lineword(aptr->ff, aptr->line, 1);
+ const char *effective_gid = procfile_lineword(aptr->ff, aptr->line, 2);
+ //const char *saved_gid = procfile_lineword(aptr->ff, aptr->line, 3);
+ //const char *filesystem_gid = procfile_lineword(aptr->ff, aptr->line, 4);
+
+ if(likely(effective_gid && *effective_gid))
+ aptr->p->gid = (uid_t)str2l(effective_gid);
+}
+
+void arl_callback_status_vmsize(const char *name, uint32_t hash, const char *value, void *dst) {
+ (void)name; (void)hash; (void)value;
+ struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst;
+ if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return;
+
+ aptr->p->status_vmsize = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1));
+}
+
+void arl_callback_status_vmswap(const char *name, uint32_t hash, const char *value, void *dst) {
+ (void)name; (void)hash; (void)value;
+ struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst;
+ if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return;
+
+ aptr->p->status_vmswap = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1));
+}
+
+void arl_callback_status_vmrss(const char *name, uint32_t hash, const char *value, void *dst) {
+ (void)name; (void)hash; (void)value;
+ struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst;
+ if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return;
+
+ aptr->p->status_vmrss = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1));
+}
+
+void arl_callback_status_rssfile(const char *name, uint32_t hash, const char *value, void *dst) {
+ (void)name; (void)hash; (void)value;
+ struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst;
+ if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return;
+
+ aptr->p->status_rssfile = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1));
+}
+
+void arl_callback_status_rssshmem(const char *name, uint32_t hash, const char *value, void *dst) {
+ (void)name; (void)hash; (void)value;
+ struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst;
+ if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return;
+
+ aptr->p->status_rssshmem = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1));
+}
+#endif // !__FreeBSD__
+
+static inline int read_proc_pid_status(struct pid_stat *p, void *ptr) {
+ p->status_vmsize = 0;
+ p->status_vmrss = 0;
+ p->status_vmshared = 0;
+ p->status_rssfile = 0;
+ p->status_rssshmem = 0;
+ p->status_vmswap = 0;
+
+#ifdef __FreeBSD__
+ struct kinfo_proc *proc_info = (struct kinfo_proc *)ptr;
+
+ p->uid = proc_info->ki_uid;
+ p->gid = proc_info->ki_groups[0];
+ p->status_vmsize = proc_info->ki_size / 1024; // in kB
+ p->status_vmrss = proc_info->ki_rssize * pagesize / 1024; // in kB
+ // TODO: what about shared and swap memory on FreeBSD?
+ return 1;
+#else
+ (void)ptr;
+
+ static struct arl_callback_ptr arl_ptr;
+ static procfile *ff = NULL;
+
+ if(unlikely(!p->status_arl)) {
+ p->status_arl = arl_create("/proc/pid/status", NULL, 60);
+ arl_expect_custom(p->status_arl, "Uid", arl_callback_status_uid, &arl_ptr);
+ arl_expect_custom(p->status_arl, "Gid", arl_callback_status_gid, &arl_ptr);
+ arl_expect_custom(p->status_arl, "VmSize", arl_callback_status_vmsize, &arl_ptr);
+ arl_expect_custom(p->status_arl, "VmRSS", arl_callback_status_vmrss, &arl_ptr);
+ arl_expect_custom(p->status_arl, "RssFile", arl_callback_status_rssfile, &arl_ptr);
+ arl_expect_custom(p->status_arl, "RssShmem", arl_callback_status_rssshmem, &arl_ptr);
+ arl_expect_custom(p->status_arl, "VmSwap", arl_callback_status_vmswap, &arl_ptr);
+ }
+
+ if(unlikely(!p->status_filename)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/proc/%d/status", netdata_configured_host_prefix, p->pid);
+ p->status_filename = strdupz(filename);
+ }
+
+ ff = procfile_reopen(ff, p->status_filename, (!ff)?" \t:,-()/":NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO);
+ if(unlikely(!ff)) return 0;
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) return 0;
+
+ calls_counter++;
+
+ // let ARL use this pid
+ arl_ptr.p = p;
+ arl_ptr.ff = ff;
+
+ size_t lines = procfile_lines(ff), l;
+ arl_begin(p->status_arl);
+
+ for(l = 0; l < lines ;l++) {
+ // debug_log("CHECK: line %zu of %zu, key '%s' = '%s'", l, lines, procfile_lineword(ff, l, 0), procfile_lineword(ff, l, 1));
+ arl_ptr.line = l;
+ if(unlikely(arl_check(p->status_arl,
+ procfile_lineword(ff, l, 0),
+ procfile_lineword(ff, l, 1)))) break;
+ }
+
+ p->status_vmshared = p->status_rssfile + p->status_rssshmem;
+
+ // debug_log("%s uid %d, gid %d, VmSize %zu, VmRSS %zu, RssFile %zu, RssShmem %zu, shared %zu", p->comm, (int)p->uid, (int)p->gid, p->status_vmsize, p->status_vmrss, p->status_rssfile, p->status_rssshmem, p->status_vmshared);
+
+ return 1;
+#endif
+}
+
+
+// ----------------------------------------------------------------------------
+
+static inline int read_proc_pid_stat(struct pid_stat *p, void *ptr) {
+ (void)ptr;
+
+#ifdef __FreeBSD__
+ struct kinfo_proc *proc_info = (struct kinfo_proc *)ptr;
+
+ if (unlikely(proc_info->ki_tdflags & TDF_IDLETD))
+ goto cleanup;
+#else
+ static procfile *ff = NULL;
+
+ if(unlikely(!p->stat_filename)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/proc/%d/stat", netdata_configured_host_prefix, p->pid);
+ p->stat_filename = strdupz(filename);
+ }
+
+ int set_quotes = (!ff)?1:0;
+
+ ff = procfile_reopen(ff, p->stat_filename, NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO);
+ if(unlikely(!ff)) goto cleanup;
+
+ // if(set_quotes) procfile_set_quotes(ff, "()");
+ if(unlikely(set_quotes))
+ procfile_set_open_close(ff, "(", ")");
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) goto cleanup;
+#endif
+
+ p->last_stat_collected_usec = p->stat_collected_usec;
+ p->stat_collected_usec = now_monotonic_usec();
+ calls_counter++;
+
+#ifdef __FreeBSD__
+ char *comm = proc_info->ki_comm;
+ p->ppid = proc_info->ki_ppid;
+#else
+ // p->pid = str2pid_t(procfile_lineword(ff, 0, 0));
+ char *comm = procfile_lineword(ff, 0, 1);
+ // p->state = *(procfile_lineword(ff, 0, 2));
+ p->ppid = (int32_t)str2pid_t(procfile_lineword(ff, 0, 3));
+ // p->pgrp = (int32_t)str2pid_t(procfile_lineword(ff, 0, 4));
+ // p->session = (int32_t)str2pid_t(procfile_lineword(ff, 0, 5));
+ // p->tty_nr = (int32_t)str2pid_t(procfile_lineword(ff, 0, 6));
+ // p->tpgid = (int32_t)str2pid_t(procfile_lineword(ff, 0, 7));
+ // p->flags = str2uint64_t(procfile_lineword(ff, 0, 8));
+#endif
+
+ if(strcmp(p->comm, comm) != 0) {
+ if(unlikely(debug_enabled)) {
+ if(p->comm[0])
+ debug_log("\tpid %d (%s) changed name to '%s'", p->pid, p->comm, comm);
+ else
+ debug_log("\tJust added %d (%s)", p->pid, comm);
+ }
+
+ strncpyz(p->comm, comm, MAX_COMPARE_NAME);
+
+ // /proc/<pid>/cmdline
+ if(likely(proc_pid_cmdline_is_needed))
+ managed_log(p, PID_LOG_CMDLINE, read_proc_pid_cmdline(p));
+
+ assign_target_to_pid(p);
+ }
+
+#ifdef __FreeBSD__
+ pid_incremental_rate(stat, p->minflt, (kernel_uint_t)proc_info->ki_rusage.ru_minflt);
+ pid_incremental_rate(stat, p->cminflt, (kernel_uint_t)proc_info->ki_rusage_ch.ru_minflt);
+ pid_incremental_rate(stat, p->majflt, (kernel_uint_t)proc_info->ki_rusage.ru_majflt);
+ pid_incremental_rate(stat, p->cmajflt, (kernel_uint_t)proc_info->ki_rusage_ch.ru_majflt);
+ pid_incremental_rate(stat, p->utime, (kernel_uint_t)proc_info->ki_rusage.ru_utime.tv_sec * 100 + proc_info->ki_rusage.ru_utime.tv_usec / 10000);
+ pid_incremental_rate(stat, p->stime, (kernel_uint_t)proc_info->ki_rusage.ru_stime.tv_sec * 100 + proc_info->ki_rusage.ru_stime.tv_usec / 10000);
+ pid_incremental_rate(stat, p->cutime, (kernel_uint_t)proc_info->ki_rusage_ch.ru_utime.tv_sec * 100 + proc_info->ki_rusage_ch.ru_utime.tv_usec / 10000);
+ pid_incremental_rate(stat, p->cstime, (kernel_uint_t)proc_info->ki_rusage_ch.ru_stime.tv_sec * 100 + proc_info->ki_rusage_ch.ru_stime.tv_usec / 10000);
+
+ p->num_threads = proc_info->ki_numthreads;
+
+ if(enable_guest_charts) {
+ enable_guest_charts = 0;
+ info("Guest charts aren't supported by FreeBSD");
+ }
+#else
+ pid_incremental_rate(stat, p->minflt, str2kernel_uint_t(procfile_lineword(ff, 0, 9)));
+ pid_incremental_rate(stat, p->cminflt, str2kernel_uint_t(procfile_lineword(ff, 0, 10)));
+ pid_incremental_rate(stat, p->majflt, str2kernel_uint_t(procfile_lineword(ff, 0, 11)));
+ pid_incremental_rate(stat, p->cmajflt, str2kernel_uint_t(procfile_lineword(ff, 0, 12)));
+ pid_incremental_rate(stat, p->utime, str2kernel_uint_t(procfile_lineword(ff, 0, 13)));
+ pid_incremental_rate(stat, p->stime, str2kernel_uint_t(procfile_lineword(ff, 0, 14)));
+ pid_incremental_rate(stat, p->cutime, str2kernel_uint_t(procfile_lineword(ff, 0, 15)));
+ pid_incremental_rate(stat, p->cstime, str2kernel_uint_t(procfile_lineword(ff, 0, 16)));
+ // p->priority = str2kernel_uint_t(procfile_lineword(ff, 0, 17));
+ // p->nice = str2kernel_uint_t(procfile_lineword(ff, 0, 18));
+ p->num_threads = (int32_t)str2uint32_t(procfile_lineword(ff, 0, 19));
+ // p->itrealvalue = str2kernel_uint_t(procfile_lineword(ff, 0, 20));
+ // p->starttime = str2kernel_uint_t(procfile_lineword(ff, 0, 21));
+ // p->vsize = str2kernel_uint_t(procfile_lineword(ff, 0, 22));
+ // p->rss = str2kernel_uint_t(procfile_lineword(ff, 0, 23));
+ // p->rsslim = str2kernel_uint_t(procfile_lineword(ff, 0, 24));
+ // p->starcode = str2kernel_uint_t(procfile_lineword(ff, 0, 25));
+ // p->endcode = str2kernel_uint_t(procfile_lineword(ff, 0, 26));
+ // p->startstack = str2kernel_uint_t(procfile_lineword(ff, 0, 27));
+ // p->kstkesp = str2kernel_uint_t(procfile_lineword(ff, 0, 28));
+ // p->kstkeip = str2kernel_uint_t(procfile_lineword(ff, 0, 29));
+ // p->signal = str2kernel_uint_t(procfile_lineword(ff, 0, 30));
+ // p->blocked = str2kernel_uint_t(procfile_lineword(ff, 0, 31));
+ // p->sigignore = str2kernel_uint_t(procfile_lineword(ff, 0, 32));
+ // p->sigcatch = str2kernel_uint_t(procfile_lineword(ff, 0, 33));
+ // p->wchan = str2kernel_uint_t(procfile_lineword(ff, 0, 34));
+ // p->nswap = str2kernel_uint_t(procfile_lineword(ff, 0, 35));
+ // p->cnswap = str2kernel_uint_t(procfile_lineword(ff, 0, 36));
+ // p->exit_signal = str2kernel_uint_t(procfile_lineword(ff, 0, 37));
+ // p->processor = str2kernel_uint_t(procfile_lineword(ff, 0, 38));
+ // p->rt_priority = str2kernel_uint_t(procfile_lineword(ff, 0, 39));
+ // p->policy = str2kernel_uint_t(procfile_lineword(ff, 0, 40));
+ // p->delayacct_blkio_ticks = str2kernel_uint_t(procfile_lineword(ff, 0, 41));
+
+ if(enable_guest_charts) {
+
+ pid_incremental_rate(stat, p->gtime, str2kernel_uint_t(procfile_lineword(ff, 0, 42)));
+ pid_incremental_rate(stat, p->cgtime, str2kernel_uint_t(procfile_lineword(ff, 0, 43)));
+
+ if (show_guest_time || p->gtime || p->cgtime) {
+ p->utime -= (p->utime >= p->gtime) ? p->gtime : p->utime;
+ p->cutime -= (p->cutime >= p->cgtime) ? p->cgtime : p->cutime;
+ show_guest_time = 1;
+ }
+ }
+#endif
+
+ if(unlikely(debug_enabled || (p->target && p->target->debug_enabled)))
+ debug_log_int("READ PROC/PID/STAT: %s/proc/%d/stat, process: '%s' on target '%s' (dt=%llu) VALUES: utime=" KERNEL_UINT_FORMAT ", stime=" KERNEL_UINT_FORMAT ", cutime=" KERNEL_UINT_FORMAT ", cstime=" KERNEL_UINT_FORMAT ", minflt=" KERNEL_UINT_FORMAT ", majflt=" KERNEL_UINT_FORMAT ", cminflt=" KERNEL_UINT_FORMAT ", cmajflt=" KERNEL_UINT_FORMAT ", threads=%d", netdata_configured_host_prefix, p->pid, p->comm, (p->target)?p->target->name:"UNSET", p->stat_collected_usec - p->last_stat_collected_usec, p->utime, p->stime, p->cutime, p->cstime, p->minflt, p->majflt, p->cminflt, p->cmajflt, p->num_threads);
+
+ if(unlikely(global_iterations_counter == 1)) {
+ p->minflt = 0;
+ p->cminflt = 0;
+ p->majflt = 0;
+ p->cmajflt = 0;
+ p->utime = 0;
+ p->stime = 0;
+ p->gtime = 0;
+ p->cutime = 0;
+ p->cstime = 0;
+ p->cgtime = 0;
+ }
+
+ return 1;
+
+cleanup:
+ p->minflt = 0;
+ p->cminflt = 0;
+ p->majflt = 0;
+ p->cmajflt = 0;
+ p->utime = 0;
+ p->stime = 0;
+ p->gtime = 0;
+ p->cutime = 0;
+ p->cstime = 0;
+ p->cgtime = 0;
+ p->num_threads = 0;
+ // p->rss = 0;
+ return 0;
+}
+
+static inline int read_proc_pid_io(struct pid_stat *p, void *ptr) {
+ (void)ptr;
+#ifdef __FreeBSD__
+ struct kinfo_proc *proc_info = (struct kinfo_proc *)ptr;
+#else
+ static procfile *ff = NULL;
+
+ if(unlikely(!p->io_filename)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/proc/%d/io", netdata_configured_host_prefix, p->pid);
+ p->io_filename = strdupz(filename);
+ }
+
+ // open the file
+ ff = procfile_reopen(ff, p->io_filename, NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO);
+ if(unlikely(!ff)) goto cleanup;
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) goto cleanup;
+#endif
+
+ calls_counter++;
+
+ p->last_io_collected_usec = p->io_collected_usec;
+ p->io_collected_usec = now_monotonic_usec();
+
+#ifdef __FreeBSD__
+ pid_incremental_rate(io, p->io_storage_bytes_read, proc_info->ki_rusage.ru_inblock);
+ pid_incremental_rate(io, p->io_storage_bytes_written, proc_info->ki_rusage.ru_oublock);
+#else
+ pid_incremental_rate(io, p->io_logical_bytes_read, str2kernel_uint_t(procfile_lineword(ff, 0, 1)));
+ pid_incremental_rate(io, p->io_logical_bytes_written, str2kernel_uint_t(procfile_lineword(ff, 1, 1)));
+ // pid_incremental_rate(io, p->io_read_calls, str2kernel_uint_t(procfile_lineword(ff, 2, 1)));
+ // pid_incremental_rate(io, p->io_write_calls, str2kernel_uint_t(procfile_lineword(ff, 3, 1)));
+ pid_incremental_rate(io, p->io_storage_bytes_read, str2kernel_uint_t(procfile_lineword(ff, 4, 1)));
+ pid_incremental_rate(io, p->io_storage_bytes_written, str2kernel_uint_t(procfile_lineword(ff, 5, 1)));
+ // pid_incremental_rate(io, p->io_cancelled_write_bytes, str2kernel_uint_t(procfile_lineword(ff, 6, 1)));
+#endif
+
+ if(unlikely(global_iterations_counter == 1)) {
+ p->io_logical_bytes_read = 0;
+ p->io_logical_bytes_written = 0;
+ // p->io_read_calls = 0;
+ // p->io_write_calls = 0;
+ p->io_storage_bytes_read = 0;
+ p->io_storage_bytes_written = 0;
+ // p->io_cancelled_write_bytes = 0;
+ }
+
+ return 1;
+
+#ifndef __FreeBSD__
+cleanup:
+ p->io_logical_bytes_read = 0;
+ p->io_logical_bytes_written = 0;
+ // p->io_read_calls = 0;
+ // p->io_write_calls = 0;
+ p->io_storage_bytes_read = 0;
+ p->io_storage_bytes_written = 0;
+ // p->io_cancelled_write_bytes = 0;
+ return 0;
+#endif
+}
+
+#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
+static inline int read_proc_stat() {
+ static char filename[FILENAME_MAX + 1] = "";
+ static procfile *ff = NULL;
+ static kernel_uint_t utime_raw = 0, stime_raw = 0, gtime_raw = 0, gntime_raw = 0, ntime_raw = 0;
+ static usec_t collected_usec = 0, last_collected_usec = 0;
+
+ if(unlikely(!ff)) {
+ snprintfz(filename, FILENAME_MAX, "%s/proc/stat", netdata_configured_host_prefix);
+ ff = procfile_open(filename, " \t:", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) goto cleanup;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) goto cleanup;
+
+ last_collected_usec = collected_usec;
+ collected_usec = now_monotonic_usec();
+
+ calls_counter++;
+
+ // temporary - it is added global_ntime;
+ kernel_uint_t global_ntime = 0;
+
+ incremental_rate(global_utime, utime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 1)), collected_usec, last_collected_usec);
+ incremental_rate(global_ntime, ntime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 2)), collected_usec, last_collected_usec);
+ incremental_rate(global_stime, stime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 3)), collected_usec, last_collected_usec);
+ incremental_rate(global_gtime, gtime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 10)), collected_usec, last_collected_usec);
+
+ global_utime += global_ntime;
+
+ if(enable_guest_charts) {
+ // temporary - it is added global_ntime;
+ kernel_uint_t global_gntime = 0;
+
+ // guest nice time, on guest time
+ incremental_rate(global_gntime, gntime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 11)), collected_usec, last_collected_usec);
+
+ global_gtime += global_gntime;
+
+ // remove guest time from user time
+ global_utime -= (global_utime > global_gtime) ? global_gtime : global_utime;
+ }
+
+ if(unlikely(global_iterations_counter == 1)) {
+ global_utime = 0;
+ global_stime = 0;
+ global_gtime = 0;
+ }
+
+ return 1;
+
+cleanup:
+ global_utime = 0;
+ global_stime = 0;
+ global_gtime = 0;
+ return 0;
+}
+#else
+static inline int read_proc_stat() {
+ return 0;
+}
+#endif
+
+// ----------------------------------------------------------------------------
+
+int file_descriptor_compare(void* a, void* b) {
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(((struct file_descriptor *)a)->magic != 0x0BADCAFE || ((struct file_descriptor *)b)->magic != 0x0BADCAFE)
+ error("Corrupted index data detected. Please report this.");
+#endif /* NETDATA_INTERNAL_CHECKS */
+
+ if(((struct file_descriptor *)a)->hash < ((struct file_descriptor *)b)->hash)
+ return -1;
+
+ else if(((struct file_descriptor *)a)->hash > ((struct file_descriptor *)b)->hash)
+ return 1;
+
+ else
+ return strcmp(((struct file_descriptor *)a)->name, ((struct file_descriptor *)b)->name);
+}
+
+// int file_descriptor_iterator(avl *a) { if(a) {}; return 0; }
+
+avl_tree all_files_index = {
+ NULL,
+ file_descriptor_compare
+};
+
+static struct file_descriptor *file_descriptor_find(const char *name, uint32_t hash) {
+ struct file_descriptor tmp;
+ tmp.hash = (hash)?hash:simple_hash(name);
+ tmp.name = name;
+ tmp.count = 0;
+ tmp.pos = 0;
+#ifdef NETDATA_INTERNAL_CHECKS
+ tmp.magic = 0x0BADCAFE;
+#endif /* NETDATA_INTERNAL_CHECKS */
+
+ return (struct file_descriptor *)avl_search(&all_files_index, (avl *) &tmp);
+}
+
+#define file_descriptor_add(fd) avl_insert(&all_files_index, (avl *)(fd))
+#define file_descriptor_remove(fd) avl_remove(&all_files_index, (avl *)(fd))
+
+// ----------------------------------------------------------------------------
+
+static inline void file_descriptor_not_used(int id)
+{
+ if(id > 0 && id < all_files_size) {
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(all_files[id].magic != 0x0BADCAFE) {
+ error("Ignoring request to remove empty file id %d.", id);
+ return;
+ }
+#endif /* NETDATA_INTERNAL_CHECKS */
+
+ debug_log("decreasing slot %d (count = %d).", id, all_files[id].count);
+
+ if(all_files[id].count > 0) {
+ all_files[id].count--;
+
+ if(!all_files[id].count) {
+ debug_log(" >> slot %d is empty.", id);
+
+ if(unlikely(file_descriptor_remove(&all_files[id]) != (void *)&all_files[id]))
+ error("INTERNAL ERROR: removal of unused fd from index, removed a different fd");
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ all_files[id].magic = 0x00000000;
+#endif /* NETDATA_INTERNAL_CHECKS */
+ all_files_len--;
+ }
+ }
+ else
+ error("Request to decrease counter of fd %d (%s), while the use counter is 0", id, all_files[id].name);
+ }
+ else error("Request to decrease counter of fd %d, which is outside the array size (1 to %d)", id, all_files_size);
+}
+
+static inline void all_files_grow() {
+ void *old = all_files;
+ int i;
+
+ // there is no empty slot
+ debug_log("extending fd array to %d entries", all_files_size + FILE_DESCRIPTORS_INCREASE_STEP);
+
+ all_files = reallocz(all_files, (all_files_size + FILE_DESCRIPTORS_INCREASE_STEP) * sizeof(struct file_descriptor));
+
+ // if the address changed, we have to rebuild the index
+ // since all pointers are now invalid
+
+ if(unlikely(old && old != (void *)all_files)) {
+ debug_log(" >> re-indexing.");
+
+ all_files_index.root = NULL;
+ for(i = 0; i < all_files_size; i++) {
+ if(!all_files[i].count) continue;
+ if(unlikely(file_descriptor_add(&all_files[i]) != (void *)&all_files[i]))
+ error("INTERNAL ERROR: duplicate indexing of fd during realloc.");
+ }
+
+ debug_log(" >> re-indexing done.");
+ }
+
+ // initialize the newly added entries
+
+ for(i = all_files_size; i < (all_files_size + FILE_DESCRIPTORS_INCREASE_STEP); i++) {
+ all_files[i].count = 0;
+ all_files[i].name = NULL;
+#ifdef NETDATA_INTERNAL_CHECKS
+ all_files[i].magic = 0x00000000;
+#endif /* NETDATA_INTERNAL_CHECKS */
+ all_files[i].pos = i;
+ }
+
+ if(unlikely(!all_files_size)) all_files_len = 1;
+ all_files_size += FILE_DESCRIPTORS_INCREASE_STEP;
+}
+
+static inline int file_descriptor_set_on_empty_slot(const char *name, uint32_t hash, FD_FILETYPE type) {
+ // check we have enough memory to add it
+ if(!all_files || all_files_len == all_files_size)
+ all_files_grow();
+
+ debug_log(" >> searching for empty slot.");
+
+ // search for an empty slot
+
+ static int last_pos = 0;
+ int i, c;
+ for(i = 0, c = last_pos ; i < all_files_size ; i++, c++) {
+ if(c >= all_files_size) c = 0;
+ if(c == 0) continue;
+
+ if(!all_files[c].count) {
+ debug_log(" >> Examining slot %d.", c);
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(all_files[c].magic == 0x0BADCAFE && all_files[c].name && file_descriptor_find(all_files[c].name, all_files[c].hash))
+ error("fd on position %d is not cleared properly. It still has %s in it.", c, all_files[c].name);
+#endif /* NETDATA_INTERNAL_CHECKS */
+
+ debug_log(" >> %s fd position %d for %s (last name: %s)", all_files[c].name?"re-using":"using", c, name, all_files[c].name);
+
+ freez((void *)all_files[c].name);
+ all_files[c].name = NULL;
+ last_pos = c;
+ break;
+ }
+ }
+
+ all_files_len++;
+
+ if(i == all_files_size) {
+ fatal("We should find an empty slot, but there isn't any");
+ exit(1);
+ }
+ // else we have an empty slot in 'c'
+
+ debug_log(" >> updating slot %d.", c);
+
+ all_files[c].name = strdupz(name);
+ all_files[c].hash = hash;
+ all_files[c].type = type;
+ all_files[c].pos = c;
+ all_files[c].count = 1;
+#ifdef NETDATA_INTERNAL_CHECKS
+ all_files[c].magic = 0x0BADCAFE;
+#endif /* NETDATA_INTERNAL_CHECKS */
+ if(unlikely(file_descriptor_add(&all_files[c]) != (void *)&all_files[c]))
+ error("INTERNAL ERROR: duplicate indexing of fd.");
+
+ debug_log("using fd position %d (name: %s)", c, all_files[c].name);
+
+ return c;
+}
+
+static inline int file_descriptor_find_or_add(const char *name, uint32_t hash) {
+ if(unlikely(!hash))
+ hash = simple_hash(name);
+
+ debug_log("adding or finding name '%s' with hash %u", name, hash);
+
+ struct file_descriptor *fd = file_descriptor_find(name, hash);
+ if(fd) {
+ // found
+ debug_log(" >> found on slot %d", fd->pos);
+
+ fd->count++;
+ return fd->pos;
+ }
+ // not found
+
+ FD_FILETYPE type;
+ if(likely(name[0] == '/')) type = FILETYPE_FILE;
+ else if(likely(strncmp(name, "pipe:", 5) == 0)) type = FILETYPE_PIPE;
+ else if(likely(strncmp(name, "socket:", 7) == 0)) type = FILETYPE_SOCKET;
+ else if(likely(strncmp(name, "anon_inode:", 11) == 0)) {
+ const char *t = &name[11];
+
+ if(strcmp(t, "inotify") == 0) type = FILETYPE_INOTIFY;
+ else if(strcmp(t, "[eventfd]") == 0) type = FILETYPE_EVENTFD;
+ else if(strcmp(t, "[eventpoll]") == 0) type = FILETYPE_EVENTPOLL;
+ else if(strcmp(t, "[timerfd]") == 0) type = FILETYPE_TIMERFD;
+ else if(strcmp(t, "[signalfd]") == 0) type = FILETYPE_SIGNALFD;
+ else {
+ debug_log("UNKNOWN anonymous inode: %s", name);
+ type = FILETYPE_OTHER;
+ }
+ }
+ else if(likely(strcmp(name, "inotify") == 0)) type = FILETYPE_INOTIFY;
+ else {
+ debug_log("UNKNOWN linkname: %s", name);
+ type = FILETYPE_OTHER;
+ }
+
+ return file_descriptor_set_on_empty_slot(name, hash, type);
+}
+
+static inline void clear_pid_fd(struct pid_fd *pfd) {
+ pfd->fd = 0;
+
+ #ifndef __FreeBSD__
+ pfd->link_hash = 0;
+ pfd->inode = 0;
+ pfd->cache_iterations_counter = 0;
+ pfd->cache_iterations_reset = 0;
+#endif
+}
+
+static inline void make_all_pid_fds_negative(struct pid_stat *p) {
+ struct pid_fd *pfd = p->fds, *pfdend = &p->fds[p->fds_size];
+ while(pfd < pfdend) {
+ pfd->fd = -(pfd->fd);
+ pfd++;
+ }
+}
+
+static inline void cleanup_negative_pid_fds(struct pid_stat *p) {
+ struct pid_fd *pfd = p->fds, *pfdend = &p->fds[p->fds_size];
+
+ while(pfd < pfdend) {
+ int fd = pfd->fd;
+
+ if(unlikely(fd < 0)) {
+ file_descriptor_not_used(-(fd));
+ clear_pid_fd(pfd);
+ }
+
+ pfd++;
+ }
+}
+
+static inline void init_pid_fds(struct pid_stat *p, size_t first, size_t size) {
+ struct pid_fd *pfd = &p->fds[first], *pfdend = &p->fds[first + size];
+ size_t i = first;
+
+ while(pfd < pfdend) {
+#ifndef __FreeBSD__
+ pfd->filename = NULL;
+#endif
+ clear_pid_fd(pfd);
+ pfd++;
+ i++;
+ }
+}
+
+static inline int read_pid_file_descriptors(struct pid_stat *p, void *ptr) {
+ (void)ptr;
+#ifdef __FreeBSD__
+ int mib[4];
+ size_t size;
+ struct kinfo_file *fds;
+ static char *fdsbuf;
+ char *bfdsbuf, *efdsbuf;
+ char fdsname[FILENAME_MAX + 1];
+
+ // we make all pid fds negative, so that
+ // we can detect unused file descriptors
+ // at the end, to free them
+ make_all_pid_fds_negative(p);
+
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_PROC;
+ mib[2] = KERN_PROC_FILEDESC;
+ mib[3] = p->pid;
+
+ if (unlikely(sysctl(mib, 4, NULL, &size, NULL, 0))) {
+ error("sysctl error: Can't get file descriptors data size for pid %d", p->pid);
+ return 0;
+ }
+ if (likely(size > 0))
+ fdsbuf = reallocz(fdsbuf, size);
+ if (unlikely(sysctl(mib, 4, fdsbuf, &size, NULL, 0))) {
+ error("sysctl error: Can't get file descriptors data for pid %d", p->pid);
+ return 0;
+ }
+
+ bfdsbuf = fdsbuf;
+ efdsbuf = fdsbuf + size;
+ while (bfdsbuf < efdsbuf) {
+ fds = (struct kinfo_file *)(uintptr_t)bfdsbuf;
+ if (unlikely(fds->kf_structsize == 0))
+ break;
+
+ // do not process file descriptors for current working directory, root directory,
+ // jail directory, ktrace vnode, text vnode and controlling terminal
+ if (unlikely(fds->kf_fd < 0)) {
+ bfdsbuf += fds->kf_structsize;
+ continue;
+ }
+
+ // get file descriptors array index
+ int fdid = fds->kf_fd;
+
+ // check if the fds array is small
+ if (unlikely(fdid >= p->fds_size)) {
+ // it is small, extend it
+
+ debug_log("extending fd memory slots for %s from %d to %d", p->comm, p->fds_size, fdid + MAX_SPARE_FDS);
+
+ p->fds = reallocz(p->fds, (fdid + MAX_SPARE_FDS) * sizeof(struct pid_fd));
+
+ // and initialize it
+ init_pid_fds(p, p->fds_size, (fdid + MAX_SPARE_FDS) - p->fds_size);
+ p->fds_size = fdid + MAX_SPARE_FDS;
+ }
+
+ if (unlikely(p->fds[fdid].fd == 0)) {
+ // we don't know this fd, get it
+
+ switch (fds->kf_type) {
+ case KF_TYPE_FIFO:
+ case KF_TYPE_VNODE:
+ if (unlikely(!fds->kf_path[0])) {
+ sprintf(fdsname, "other: inode: %lu", fds->kf_un.kf_file.kf_file_fileid);
+ break;
+ }
+ sprintf(fdsname, "%s", fds->kf_path);
+ break;
+ case KF_TYPE_SOCKET:
+ switch (fds->kf_sock_domain) {
+ case AF_INET:
+ case AF_INET6:
+ if (fds->kf_sock_protocol == IPPROTO_TCP)
+ sprintf(fdsname, "socket: %d %lx", fds->kf_sock_protocol, fds->kf_un.kf_sock.kf_sock_inpcb);
+ else
+ sprintf(fdsname, "socket: %d %lx", fds->kf_sock_protocol, fds->kf_un.kf_sock.kf_sock_pcb);
+ break;
+ case AF_UNIX:
+ /* print address of pcb and connected pcb */
+ sprintf(fdsname, "socket: %lx %lx", fds->kf_un.kf_sock.kf_sock_pcb, fds->kf_un.kf_sock.kf_sock_unpconn);
+ break;
+ default:
+ /* print protocol number and socket address */
+#if __FreeBSD_version < 1200031
+ sprintf(fdsname, "socket: other: %d %s %s", fds->kf_sock_protocol, fds->kf_sa_local.__ss_pad1, fds->kf_sa_local.__ss_pad2);
+#else
+ sprintf(fdsname, "socket: other: %d %s %s", fds->kf_sock_protocol, fds->kf_un.kf_sock.kf_sa_local.__ss_pad1, fds->kf_un.kf_sock.kf_sa_local.__ss_pad2);
+#endif
+ }
+ break;
+ case KF_TYPE_PIPE:
+ sprintf(fdsname, "pipe: %lu %lu", fds->kf_un.kf_pipe.kf_pipe_addr, fds->kf_un.kf_pipe.kf_pipe_peer);
+ break;
+ case KF_TYPE_PTS:
+#if __FreeBSD_version < 1200031
+ sprintf(fdsname, "other: pts: %u", fds->kf_un.kf_pts.kf_pts_dev);
+#else
+ sprintf(fdsname, "other: pts: %lu", fds->kf_un.kf_pts.kf_pts_dev);
+#endif
+ break;
+ case KF_TYPE_SHM:
+ sprintf(fdsname, "other: shm: %s size: %lu", fds->kf_path, fds->kf_un.kf_file.kf_file_size);
+ break;
+ case KF_TYPE_SEM:
+ sprintf(fdsname, "other: sem: %u", fds->kf_un.kf_sem.kf_sem_value);
+ break;
+ default:
+ sprintf(fdsname, "other: pid: %d fd: %d", fds->kf_un.kf_proc.kf_pid, fds->kf_fd);
+ }
+
+ // if another process already has this, we will get
+ // the same id
+ p->fds[fdid].fd = file_descriptor_find_or_add(fdsname, 0);
+ }
+
+ // else make it positive again, we need it
+ // of course, the actual file may have changed
+
+ else
+ p->fds[fdid].fd = -p->fds[fdid].fd;
+
+ bfdsbuf += fds->kf_structsize;
+ }
+#else
+ if(unlikely(!p->fds_dirname)) {
+ char dirname[FILENAME_MAX+1];
+ snprintfz(dirname, FILENAME_MAX, "%s/proc/%d/fd", netdata_configured_host_prefix, p->pid);
+ p->fds_dirname = strdupz(dirname);
+ }
+
+ DIR *fds = opendir(p->fds_dirname);
+ if(unlikely(!fds)) return 0;
+
+ struct dirent *de;
+ char linkname[FILENAME_MAX + 1];
+
+ // we make all pid fds negative, so that
+ // we can detect unused file descriptors
+ // at the end, to free them
+ make_all_pid_fds_negative(p);
+
+ while((de = readdir(fds))) {
+ // we need only files with numeric names
+
+ if(unlikely(de->d_name[0] < '0' || de->d_name[0] > '9'))
+ continue;
+
+ // get its number
+ int fdid = (int) str2l(de->d_name);
+ if(unlikely(fdid < 0)) continue;
+
+ // check if the fds array is small
+ if(unlikely((size_t)fdid >= p->fds_size)) {
+ // it is small, extend it
+
+ debug_log("extending fd memory slots for %s from %d to %d"
+ , p->comm
+ , p->fds_size
+ , fdid + MAX_SPARE_FDS
+ );
+
+ p->fds = reallocz(p->fds, (fdid + MAX_SPARE_FDS) * sizeof(struct pid_fd));
+
+ // and initialize it
+ init_pid_fds(p, p->fds_size, (fdid + MAX_SPARE_FDS) - p->fds_size);
+ p->fds_size = (size_t)fdid + MAX_SPARE_FDS;
+ }
+
+ if(unlikely(p->fds[fdid].fd < 0 && de->d_ino != p->fds[fdid].inode)) {
+ // inodes do not match, clear the previous entry
+ inodes_changed_counter++;
+ file_descriptor_not_used(-p->fds[fdid].fd);
+ clear_pid_fd(&p->fds[fdid]);
+ }
+
+ if(p->fds[fdid].fd < 0 && p->fds[fdid].cache_iterations_counter > 0) {
+ p->fds[fdid].fd = -p->fds[fdid].fd;
+ p->fds[fdid].cache_iterations_counter--;
+ continue;
+ }
+
+ if(unlikely(!p->fds[fdid].filename)) {
+ filenames_allocated_counter++;
+ char fdname[FILENAME_MAX + 1];
+ snprintfz(fdname, FILENAME_MAX, "%s/proc/%d/fd/%s", netdata_configured_host_prefix, p->pid, de->d_name);
+ p->fds[fdid].filename = strdupz(fdname);
+ }
+
+ file_counter++;
+ ssize_t l = readlink(p->fds[fdid].filename, linkname, FILENAME_MAX);
+ if(unlikely(l == -1)) {
+ // cannot read the link
+
+ if(debug_enabled || (p->target && p->target->debug_enabled))
+ error("Cannot read link %s", p->fds[fdid].filename);
+
+ if(unlikely(p->fds[fdid].fd < 0)) {
+ file_descriptor_not_used(-p->fds[fdid].fd);
+ clear_pid_fd(&p->fds[fdid]);
+ }
+
+ continue;
+ }
+ else
+ linkname[l] = '\0';
+
+ uint32_t link_hash = simple_hash(linkname);
+
+ if(unlikely(p->fds[fdid].fd < 0 && p->fds[fdid].link_hash != link_hash)) {
+ // the link changed
+ links_changed_counter++;
+ file_descriptor_not_used(-p->fds[fdid].fd);
+ clear_pid_fd(&p->fds[fdid]);
+ }
+
+ if(unlikely(p->fds[fdid].fd == 0)) {
+ // we don't know this fd, get it
+
+ // if another process already has this, we will get
+ // the same id
+ p->fds[fdid].fd = file_descriptor_find_or_add(linkname, link_hash);
+ p->fds[fdid].inode = de->d_ino;
+ p->fds[fdid].link_hash = link_hash;
+ }
+ else {
+ // else make it positive again, we need it
+ p->fds[fdid].fd = -p->fds[fdid].fd;
+ }
+
+ // caching control
+ // without this we read all the files on every iteration
+ if(max_fds_cache_seconds > 0) {
+ size_t spread = ((size_t)max_fds_cache_seconds > 10) ? 10 : (size_t)max_fds_cache_seconds;
+
+ // cache it for a few iterations
+ size_t max = ((size_t) max_fds_cache_seconds + (fdid % spread)) / (size_t) update_every;
+ p->fds[fdid].cache_iterations_reset++;
+
+ if(unlikely(p->fds[fdid].cache_iterations_reset % spread == (size_t) fdid % spread))
+ p->fds[fdid].cache_iterations_reset++;
+
+ if(unlikely((fdid <= 2 && p->fds[fdid].cache_iterations_reset > 5) ||
+ p->fds[fdid].cache_iterations_reset > max)) {
+ // for stdin, stdout, stderr (fdid <= 2) we have checked a few times, or if it goes above the max, goto max
+ p->fds[fdid].cache_iterations_reset = max;
+ }
+
+ p->fds[fdid].cache_iterations_counter = p->fds[fdid].cache_iterations_reset;
+ }
+ }
+
+ closedir(fds);
+#endif
+ cleanup_negative_pid_fds(p);
+
+ return 1;
+}
+
+// ----------------------------------------------------------------------------
+
+static inline int debug_print_process_and_parents(struct pid_stat *p, usec_t time) {
+ char *prefix = "\\_ ";
+ int indent = 0;
+
+ if(p->parent)
+ indent = debug_print_process_and_parents(p->parent, p->stat_collected_usec);
+ else
+ prefix = " > ";
+
+ char buffer[indent + 1];
+ int i;
+
+ for(i = 0; i < indent ;i++) buffer[i] = ' ';
+ buffer[i] = '\0';
+
+ fprintf(stderr, " %s %s%s (%d %s %llu"
+ , buffer
+ , prefix
+ , p->comm
+ , p->pid
+ , p->updated?"running":"exited"
+ , p->stat_collected_usec - time
+ );
+
+ if(p->utime) fprintf(stderr, " utime=" KERNEL_UINT_FORMAT, p->utime);
+ if(p->stime) fprintf(stderr, " stime=" KERNEL_UINT_FORMAT, p->stime);
+ if(p->gtime) fprintf(stderr, " gtime=" KERNEL_UINT_FORMAT, p->gtime);
+ if(p->cutime) fprintf(stderr, " cutime=" KERNEL_UINT_FORMAT, p->cutime);
+ if(p->cstime) fprintf(stderr, " cstime=" KERNEL_UINT_FORMAT, p->cstime);
+ if(p->cgtime) fprintf(stderr, " cgtime=" KERNEL_UINT_FORMAT, p->cgtime);
+ if(p->minflt) fprintf(stderr, " minflt=" KERNEL_UINT_FORMAT, p->minflt);
+ if(p->cminflt) fprintf(stderr, " cminflt=" KERNEL_UINT_FORMAT, p->cminflt);
+ if(p->majflt) fprintf(stderr, " majflt=" KERNEL_UINT_FORMAT, p->majflt);
+ if(p->cmajflt) fprintf(stderr, " cmajflt=" KERNEL_UINT_FORMAT, p->cmajflt);
+ fprintf(stderr, ")\n");
+
+ return indent + 1;
+}
+
+static inline void debug_print_process_tree(struct pid_stat *p, char *msg) {
+ debug_log("%s: process %s (%d, %s) with parents:", msg, p->comm, p->pid, p->updated?"running":"exited");
+ debug_print_process_and_parents(p, p->stat_collected_usec);
+}
+
+static inline void debug_find_lost_child(struct pid_stat *pe, kernel_uint_t lost, int type) {
+ int found = 0;
+ struct pid_stat *p = NULL;
+
+ for(p = root_of_pids; p ; p = p->next) {
+ if(p == pe) continue;
+
+ switch(type) {
+ case 1:
+ if(p->cminflt > lost) {
+ fprintf(stderr, " > process %d (%s) could use the lost exited child minflt " KERNEL_UINT_FORMAT " of process %d (%s)\n", p->pid, p->comm, lost, pe->pid, pe->comm);
+ found++;
+ }
+ break;
+
+ case 2:
+ if(p->cmajflt > lost) {
+ fprintf(stderr, " > process %d (%s) could use the lost exited child majflt " KERNEL_UINT_FORMAT " of process %d (%s)\n", p->pid, p->comm, lost, pe->pid, pe->comm);
+ found++;
+ }
+ break;
+
+ case 3:
+ if(p->cutime > lost) {
+ fprintf(stderr, " > process %d (%s) could use the lost exited child utime " KERNEL_UINT_FORMAT " of process %d (%s)\n", p->pid, p->comm, lost, pe->pid, pe->comm);
+ found++;
+ }
+ break;
+
+ case 4:
+ if(p->cstime > lost) {
+ fprintf(stderr, " > process %d (%s) could use the lost exited child stime " KERNEL_UINT_FORMAT " of process %d (%s)\n", p->pid, p->comm, lost, pe->pid, pe->comm);
+ found++;
+ }
+ break;
+
+ case 5:
+ if(p->cgtime > lost) {
+ fprintf(stderr, " > process %d (%s) could use the lost exited child gtime " KERNEL_UINT_FORMAT " of process %d (%s)\n", p->pid, p->comm, lost, pe->pid, pe->comm);
+ found++;
+ }
+ break;
+ }
+ }
+
+ if(!found) {
+ switch(type) {
+ case 1:
+ fprintf(stderr, " > cannot find any process to use the lost exited child minflt " KERNEL_UINT_FORMAT " of process %d (%s)\n", lost, pe->pid, pe->comm);
+ break;
+
+ case 2:
+ fprintf(stderr, " > cannot find any process to use the lost exited child majflt " KERNEL_UINT_FORMAT " of process %d (%s)\n", lost, pe->pid, pe->comm);
+ break;
+
+ case 3:
+ fprintf(stderr, " > cannot find any process to use the lost exited child utime " KERNEL_UINT_FORMAT " of process %d (%s)\n", lost, pe->pid, pe->comm);
+ break;
+
+ case 4:
+ fprintf(stderr, " > cannot find any process to use the lost exited child stime " KERNEL_UINT_FORMAT " of process %d (%s)\n", lost, pe->pid, pe->comm);
+ break;
+
+ case 5:
+ fprintf(stderr, " > cannot find any process to use the lost exited child gtime " KERNEL_UINT_FORMAT " of process %d (%s)\n", lost, pe->pid, pe->comm);
+ break;
+ }
+ }
+}
+
+static inline kernel_uint_t remove_exited_child_from_parent(kernel_uint_t *field, kernel_uint_t *pfield) {
+ kernel_uint_t absorbed = 0;
+
+ if(*field > *pfield) {
+ absorbed += *pfield;
+ *field -= *pfield;
+ *pfield = 0;
+ }
+ else {
+ absorbed += *field;
+ *pfield -= *field;
+ *field = 0;
+ }
+
+ return absorbed;
+}
+
+static inline void process_exited_processes() {
+ struct pid_stat *p;
+
+ for(p = root_of_pids; p ; p = p->next) {
+ if(p->updated || !p->stat_collected_usec)
+ continue;
+
+ kernel_uint_t utime = (p->utime_raw + p->cutime_raw) * (USEC_PER_SEC * RATES_DETAIL) / (p->stat_collected_usec - p->last_stat_collected_usec);
+ kernel_uint_t stime = (p->stime_raw + p->cstime_raw) * (USEC_PER_SEC * RATES_DETAIL) / (p->stat_collected_usec - p->last_stat_collected_usec);
+ kernel_uint_t gtime = (p->gtime_raw + p->cgtime_raw) * (USEC_PER_SEC * RATES_DETAIL) / (p->stat_collected_usec - p->last_stat_collected_usec);
+ kernel_uint_t minflt = (p->minflt_raw + p->cminflt_raw) * (USEC_PER_SEC * RATES_DETAIL) / (p->stat_collected_usec - p->last_stat_collected_usec);
+ kernel_uint_t majflt = (p->majflt_raw + p->cmajflt_raw) * (USEC_PER_SEC * RATES_DETAIL) / (p->stat_collected_usec - p->last_stat_collected_usec);
+
+ if(utime + stime + gtime + minflt + majflt == 0)
+ continue;
+
+ if(unlikely(debug_enabled)) {
+ debug_log("Absorb %s (%d %s total resources: utime=" KERNEL_UINT_FORMAT " stime=" KERNEL_UINT_FORMAT " gtime=" KERNEL_UINT_FORMAT " minflt=" KERNEL_UINT_FORMAT " majflt=" KERNEL_UINT_FORMAT ")"
+ , p->comm
+ , p->pid
+ , p->updated?"running":"exited"
+ , utime
+ , stime
+ , gtime
+ , minflt
+ , majflt
+ );
+ debug_print_process_tree(p, "Searching parents");
+ }
+
+ struct pid_stat *pp;
+ for(pp = p->parent; pp ; pp = pp->parent) {
+ if(!pp->updated) continue;
+
+ kernel_uint_t absorbed;
+ absorbed = remove_exited_child_from_parent(&utime, &pp->cutime);
+ if(unlikely(debug_enabled && absorbed))
+ debug_log(" > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " utime (remaining: " KERNEL_UINT_FORMAT ")", pp->comm, pp->pid, pp->updated?"running":"exited", absorbed, utime);
+
+ absorbed = remove_exited_child_from_parent(&stime, &pp->cstime);
+ if(unlikely(debug_enabled && absorbed))
+ debug_log(" > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " stime (remaining: " KERNEL_UINT_FORMAT ")", pp->comm, pp->pid, pp->updated?"running":"exited", absorbed, stime);
+
+ absorbed = remove_exited_child_from_parent(&gtime, &pp->cgtime);
+ if(unlikely(debug_enabled && absorbed))
+ debug_log(" > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " gtime (remaining: " KERNEL_UINT_FORMAT ")", pp->comm, pp->pid, pp->updated?"running":"exited", absorbed, gtime);
+
+ absorbed = remove_exited_child_from_parent(&minflt, &pp->cminflt);
+ if(unlikely(debug_enabled && absorbed))
+ debug_log(" > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " minflt (remaining: " KERNEL_UINT_FORMAT ")", pp->comm, pp->pid, pp->updated?"running":"exited", absorbed, minflt);
+
+ absorbed = remove_exited_child_from_parent(&majflt, &pp->cmajflt);
+ if(unlikely(debug_enabled && absorbed))
+ debug_log(" > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " majflt (remaining: " KERNEL_UINT_FORMAT ")", pp->comm, pp->pid, pp->updated?"running":"exited", absorbed, majflt);
+ }
+
+ if(unlikely(utime + stime + gtime + minflt + majflt > 0)) {
+ if(unlikely(debug_enabled)) {
+ if(utime) debug_find_lost_child(p, utime, 3);
+ if(stime) debug_find_lost_child(p, stime, 4);
+ if(gtime) debug_find_lost_child(p, gtime, 5);
+ if(minflt) debug_find_lost_child(p, minflt, 1);
+ if(majflt) debug_find_lost_child(p, majflt, 2);
+ }
+
+ p->keep = 1;
+
+ debug_log(" > remaining resources - KEEP - for another loop: %s (%d %s total resources: utime=" KERNEL_UINT_FORMAT " stime=" KERNEL_UINT_FORMAT " gtime=" KERNEL_UINT_FORMAT " minflt=" KERNEL_UINT_FORMAT " majflt=" KERNEL_UINT_FORMAT ")"
+ , p->comm
+ , p->pid
+ , p->updated?"running":"exited"
+ , utime
+ , stime
+ , gtime
+ , minflt
+ , majflt
+ );
+
+ for(pp = p->parent; pp ; pp = pp->parent) {
+ if(pp->updated) break;
+ pp->keep = 1;
+
+ debug_log(" > - KEEP - parent for another loop: %s (%d %s)"
+ , pp->comm
+ , pp->pid
+ , pp->updated?"running":"exited"
+ );
+ }
+
+ p->utime_raw = utime * (p->stat_collected_usec - p->last_stat_collected_usec) / (USEC_PER_SEC * RATES_DETAIL);
+ p->stime_raw = stime * (p->stat_collected_usec - p->last_stat_collected_usec) / (USEC_PER_SEC * RATES_DETAIL);
+ p->gtime_raw = gtime * (p->stat_collected_usec - p->last_stat_collected_usec) / (USEC_PER_SEC * RATES_DETAIL);
+ p->minflt_raw = minflt * (p->stat_collected_usec - p->last_stat_collected_usec) / (USEC_PER_SEC * RATES_DETAIL);
+ p->majflt_raw = majflt * (p->stat_collected_usec - p->last_stat_collected_usec) / (USEC_PER_SEC * RATES_DETAIL);
+ p->cutime_raw = p->cstime_raw = p->cgtime_raw = p->cminflt_raw = p->cmajflt_raw = 0;
+
+ debug_log(" ");
+ }
+ else
+ debug_log(" > totally absorbed - DONE - %s (%d %s)"
+ , p->comm
+ , p->pid
+ , p->updated?"running":"exited"
+ );
+ }
+}
+
+static inline void link_all_processes_to_their_parents(void) {
+ struct pid_stat *p, *pp;
+
+ // link all children to their parents
+ // and update children count on parents
+ for(p = root_of_pids; p ; p = p->next) {
+ // for each process found
+
+ p->sortlist = 0;
+ p->parent = NULL;
+
+ if(unlikely(!p->ppid)) {
+ p->parent = NULL;
+ continue;
+ }
+
+ pp = all_pids[p->ppid];
+ if(likely(pp)) {
+ p->parent = pp;
+ pp->children_count++;
+
+ if(unlikely(debug_enabled || (p->target && p->target->debug_enabled)))
+ debug_log_int("child %d (%s, %s) on target '%s' has parent %d (%s, %s). Parent: utime=" KERNEL_UINT_FORMAT ", stime=" KERNEL_UINT_FORMAT ", gtime=" KERNEL_UINT_FORMAT ", minflt=" KERNEL_UINT_FORMAT ", majflt=" KERNEL_UINT_FORMAT ", cutime=" KERNEL_UINT_FORMAT ", cstime=" KERNEL_UINT_FORMAT ", cgtime=" KERNEL_UINT_FORMAT ", cminflt=" KERNEL_UINT_FORMAT ", cmajflt=" KERNEL_UINT_FORMAT "", p->pid, p->comm, p->updated?"running":"exited", (p->target)?p->target->name:"UNSET", pp->pid, pp->comm, pp->updated?"running":"exited", pp->utime, pp->stime, pp->gtime, pp->minflt, pp->majflt, pp->cutime, pp->cstime, pp->cgtime, pp->cminflt, pp->cmajflt);
+ }
+ else {
+ p->parent = NULL;
+ error("pid %d %s states parent %d, but the later does not exist.", p->pid, p->comm, p->ppid);
+ }
+ }
+}
+
+// ----------------------------------------------------------------------------
+
+// 1. read all files in /proc
+// 2. for each numeric directory:
+// i. read /proc/pid/stat
+// ii. read /proc/pid/status
+// iii. read /proc/pid/io (requires root access)
+// iii. read the entries in directory /proc/pid/fd (requires root access)
+// for each entry:
+// a. find or create a struct file_descriptor
+// b. cleanup any old/unused file_descriptors
+
+// after all these, some pids may be linked to targets, while others may not
+
+// in case of errors, only 1 every 1000 errors is printed
+// to avoid filling up all disk space
+// if debug is enabled, all errors are printed
+
+#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
+static int compar_pid(const void *pid1, const void *pid2) {
+
+ struct pid_stat *p1 = all_pids[*((pid_t *)pid1)];
+ struct pid_stat *p2 = all_pids[*((pid_t *)pid2)];
+
+ if(p1->sortlist > p2->sortlist)
+ return -1;
+ else
+ return 1;
+}
+#endif
+
+static inline int collect_data_for_pid(pid_t pid, void *ptr) {
+ if(unlikely(pid < 0 || pid > pid_max)) {
+ error("Invalid pid %d read (expected %d to %d). Ignoring process.", pid, 0, pid_max);
+ return 0;
+ }
+
+ struct pid_stat *p = get_pid_entry(pid);
+ if(unlikely(!p || p->read)) return 0;
+ p->read = 1;
+
+ // debug_log("Reading process %d (%s), sortlist %d", p->pid, p->comm, p->sortlist);
+
+ // --------------------------------------------------------------------
+ // /proc/<pid>/stat
+
+ if(unlikely(!managed_log(p, PID_LOG_STAT, read_proc_pid_stat(p, ptr))))
+ // there is no reason to proceed if we cannot get its status
+ return 0;
+
+ // check its parent pid
+ if(unlikely(p->ppid < 0 || p->ppid > pid_max)) {
+ error("Pid %d (command '%s') states invalid parent pid %d. Using 0.", pid, p->comm, p->ppid);
+ p->ppid = 0;
+ }
+
+ // --------------------------------------------------------------------
+ // /proc/<pid>/io
+
+ managed_log(p, PID_LOG_IO, read_proc_pid_io(p, ptr));
+
+ // --------------------------------------------------------------------
+ // /proc/<pid>/status
+
+ if(unlikely(!managed_log(p, PID_LOG_STATUS, read_proc_pid_status(p, ptr))))
+ // there is no reason to proceed if we cannot get its status
+ return 0;
+
+ // --------------------------------------------------------------------
+ // /proc/<pid>/fd
+
+ if(enable_file_charts)
+ managed_log(p, PID_LOG_FDS, read_pid_file_descriptors(p, ptr));
+
+ // --------------------------------------------------------------------
+ // done!
+
+ if(unlikely(debug_enabled && include_exited_childs && all_pids_count && p->ppid && all_pids[p->ppid] && !all_pids[p->ppid]->read))
+ debug_log("Read process %d (%s) sortlisted %d, but its parent %d (%s) sortlisted %d, is not read", p->pid, p->comm, p->sortlist, all_pids[p->ppid]->pid, all_pids[p->ppid]->comm, all_pids[p->ppid]->sortlist);
+
+ // mark it as updated
+ p->updated = 1;
+ p->keep = 0;
+ p->keeploops = 0;
+
+ return 1;
+}
+
+static int collect_data_for_all_processes(void) {
+ struct pid_stat *p = NULL;
+
+#ifdef __FreeBSD__
+ int i, procnum;
+
+ static size_t procbase_size = 0;
+ static struct kinfo_proc *procbase = NULL;
+
+ size_t new_procbase_size;
+
+ int mib[3] = { CTL_KERN, KERN_PROC, KERN_PROC_PROC };
+ if (unlikely(sysctl(mib, 3, NULL, &new_procbase_size, NULL, 0))) {
+ error("sysctl error: Can't get processes data size");
+ return 0;
+ }
+
+ // give it some air for processes that may be started
+ // during this little time.
+ new_procbase_size += 100 * sizeof(struct kinfo_proc);
+
+ // increase the buffer if needed
+ if(new_procbase_size > procbase_size) {
+ procbase_size = new_procbase_size;
+ procbase = reallocz(procbase, procbase_size);
+ }
+
+ // sysctl() gets from new_procbase_size the buffer size
+ // and also returns to it the amount of data filled in
+ new_procbase_size = procbase_size;
+
+ // get the processes from the system
+ if (unlikely(sysctl(mib, 3, procbase, &new_procbase_size, NULL, 0))) {
+ error("sysctl error: Can't get processes data");
+ return 0;
+ }
+
+ // based on the amount of data filled in
+ // calculate the number of processes we got
+ procnum = new_procbase_size / sizeof(struct kinfo_proc);
+
+#endif
+
+ if(all_pids_count) {
+#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
+ size_t slc = 0;
+#endif
+ for(p = root_of_pids; p ; p = p->next) {
+ p->read = 0; // mark it as not read, so that collect_data_for_pid() will read it
+ p->updated = 0;
+ p->merged = 0;
+ p->children_count = 0;
+ p->parent = NULL;
+
+#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
+ all_pids_sortlist[slc++] = p->pid;
+#endif
+ }
+
+#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
+ if(unlikely(slc != all_pids_count)) {
+ error("Internal error: I was thinking I had %zu processes in my arrays, but it seems there are %zu.", all_pids_count, slc);
+ all_pids_count = slc;
+ }
+
+ if(include_exited_childs) {
+ // Read parents before childs
+ // This is needed to prevent a situation where
+ // a child is found running, but until we read
+ // its parent, it has exited and its parent
+ // has accumulated its resources.
+
+ qsort((void *)all_pids_sortlist, (size_t)all_pids_count, sizeof(pid_t), compar_pid);
+
+ // we forward read all running processes
+ // collect_data_for_pid() is smart enough,
+ // not to read the same pid twice per iteration
+ for(slc = 0; slc < all_pids_count; slc++)
+ collect_data_for_pid(all_pids_sortlist[slc], NULL);
+ }
+#endif
+ }
+
+#ifdef __FreeBSD__
+ for (i = 0 ; i < procnum ; ++i) {
+ pid_t pid = procbase[i].ki_pid;
+ collect_data_for_pid(pid, &procbase[i]);
+ }
+#else
+ char dirname[FILENAME_MAX + 1];
+
+ snprintfz(dirname, FILENAME_MAX, "%s/proc", netdata_configured_host_prefix);
+ DIR *dir = opendir(dirname);
+ if(!dir) return 0;
+
+ struct dirent *de = NULL;
+
+ while((de = readdir(dir))) {
+ char *endptr = de->d_name;
+
+ if(unlikely(de->d_type != DT_DIR || de->d_name[0] < '0' || de->d_name[0] > '9'))
+ continue;
+
+ pid_t pid = (pid_t) strtoul(de->d_name, &endptr, 10);
+
+ // make sure we read a valid number
+ if(unlikely(endptr == de->d_name || *endptr != '\0'))
+ continue;
+
+ collect_data_for_pid(pid, NULL);
+ }
+ closedir(dir);
+#endif
+
+ if(!all_pids_count)
+ return 0;
+
+ // we need /proc/stat to normalize the cpu consumption of the exited childs
+ read_proc_stat();
+
+ // build the process tree
+ link_all_processes_to_their_parents();
+
+ // normally this is done
+ // however we may have processes exited while we collected values
+ // so let's find the exited ones
+ // we do this by collecting the ownership of process
+ // if we manage to get the ownership, the process still runs
+ process_exited_processes();
+
+ return 1;
+}
+
+// ----------------------------------------------------------------------------
+// update statistics on the targets
+
+// 1. link all childs to their parents
+// 2. go from bottom to top, marking as merged all childs to their parents
+// this step links all parents without a target to the child target, if any
+// 3. link all top level processes (the ones not merged) to the default target
+// 4. go from top to bottom, linking all childs without a target, to their parent target
+// after this step, all processes have a target
+// [5. for each killed pid (updated = 0), remove its usage from its target]
+// 6. zero all apps_groups_targets
+// 7. concentrate all values on the apps_groups_targets
+// 8. remove all killed processes
+// 9. find the unique file count for each target
+// check: update_apps_groups_statistics()
+
+static void cleanup_exited_pids(void) {
+ size_t c;
+ struct pid_stat *p = NULL;
+
+ for(p = root_of_pids; p ;) {
+ if(!p->updated && (!p->keep || p->keeploops > 0)) {
+ if(unlikely(debug_enabled && (p->keep || p->keeploops)))
+ debug_log(" > CLEANUP cannot keep exited process %d (%s) anymore - removing it.", p->pid, p->comm);
+
+ for(c = 0; c < p->fds_size; c++)
+ if(p->fds[c].fd > 0) {
+ file_descriptor_not_used(p->fds[c].fd);
+ clear_pid_fd(&p->fds[c]);
+ }
+
+ pid_t r = p->pid;
+ p = p->next;
+ del_pid_entry(r);
+ }
+ else {
+ if(unlikely(p->keep)) p->keeploops++;
+ p->keep = 0;
+ p = p->next;
+ }
+ }
+}
+
+static void apply_apps_groups_targets_inheritance(void) {
+ struct pid_stat *p = NULL;
+
+ // children that do not have a target
+ // inherit their target from their parent
+ int found = 1, loops = 0;
+ while(found) {
+ if(unlikely(debug_enabled)) loops++;
+ found = 0;
+ for(p = root_of_pids; p ; p = p->next) {
+ // if this process does not have a target
+ // and it has a parent
+ // and its parent has a target
+ // then, set the parent's target to this process
+ if(unlikely(!p->target && p->parent && p->parent->target)) {
+ p->target = p->parent->target;
+ found++;
+
+ if(debug_enabled || (p->target && p->target->debug_enabled))
+ debug_log_int("TARGET INHERITANCE: %s is inherited by %d (%s) from its parent %d (%s).", p->target->name, p->pid, p->comm, p->parent->pid, p->parent->comm);
+ }
+ }
+ }
+
+ // find all the procs with 0 childs and merge them to their parents
+ // repeat, until nothing more can be done.
+ int sortlist = 1;
+ found = 1;
+ while(found) {
+ if(unlikely(debug_enabled)) loops++;
+ found = 0;
+
+ for(p = root_of_pids; p ; p = p->next) {
+ if(unlikely(!p->sortlist && !p->children_count))
+ p->sortlist = sortlist++;
+
+ if(unlikely(
+ !p->children_count // if this process does not have any children
+ && !p->merged // and is not already merged
+ && p->parent // and has a parent
+ && p->parent->children_count // and its parent has children
+ // and the target of this process and its parent is the same,
+ // or the parent does not have a target
+ && (p->target == p->parent->target || !p->parent->target)
+ && p->ppid != INIT_PID // and its parent is not init
+ )) {
+ // mark it as merged
+ p->parent->children_count--;
+ p->merged = 1;
+
+ // the parent inherits the child's target, if it does not have a target itself
+ if(unlikely(p->target && !p->parent->target)) {
+ p->parent->target = p->target;
+
+ if(debug_enabled || (p->target && p->target->debug_enabled))
+ debug_log_int("TARGET INHERITANCE: %s is inherited by %d (%s) from its child %d (%s).", p->target->name, p->parent->pid, p->parent->comm, p->pid, p->comm);
+ }
+
+ found++;
+ }
+ }
+
+ debug_log("TARGET INHERITANCE: merged %d processes", found);
+ }
+
+ // init goes always to default target
+ if(all_pids[INIT_PID])
+ all_pids[INIT_PID]->target = apps_groups_default_target;
+
+ // pid 0 goes always to default target
+ if(all_pids[0])
+ all_pids[0]->target = apps_groups_default_target;
+
+ // give a default target on all top level processes
+ if(unlikely(debug_enabled)) loops++;
+ for(p = root_of_pids; p ; p = p->next) {
+ // if the process is not merged itself
+ // then is is a top level process
+ if(unlikely(!p->merged && !p->target))
+ p->target = apps_groups_default_target;
+
+ // make sure all processes have a sortlist
+ if(unlikely(!p->sortlist))
+ p->sortlist = sortlist++;
+ }
+
+ if(all_pids[1])
+ all_pids[1]->sortlist = sortlist++;
+
+ // give a target to all merged child processes
+ found = 1;
+ while(found) {
+ if(unlikely(debug_enabled)) loops++;
+ found = 0;
+ for(p = root_of_pids; p ; p = p->next) {
+ if(unlikely(!p->target && p->merged && p->parent && p->parent->target)) {
+ p->target = p->parent->target;
+ found++;
+
+ if(debug_enabled || (p->target && p->target->debug_enabled))
+ debug_log_int("TARGET INHERITANCE: %s is inherited by %d (%s) from its parent %d (%s) at phase 2.", p->target->name, p->pid, p->comm, p->parent->pid, p->parent->comm);
+ }
+ }
+ }
+
+ debug_log("apply_apps_groups_targets_inheritance() made %d loops on the process tree", loops);
+}
+
+static size_t zero_all_targets(struct target *root) {
+ struct target *w;
+ size_t count = 0;
+
+ for (w = root; w ; w = w->next) {
+ count++;
+
+ w->minflt = 0;
+ w->majflt = 0;
+ w->utime = 0;
+ w->stime = 0;
+ w->gtime = 0;
+ w->cminflt = 0;
+ w->cmajflt = 0;
+ w->cutime = 0;
+ w->cstime = 0;
+ w->cgtime = 0;
+ w->num_threads = 0;
+ // w->rss = 0;
+ w->processes = 0;
+
+ w->status_vmsize = 0;
+ w->status_vmrss = 0;
+ w->status_vmshared = 0;
+ w->status_rssfile = 0;
+ w->status_rssshmem = 0;
+ w->status_vmswap = 0;
+
+ w->io_logical_bytes_read = 0;
+ w->io_logical_bytes_written = 0;
+ // w->io_read_calls = 0;
+ // w->io_write_calls = 0;
+ w->io_storage_bytes_read = 0;
+ w->io_storage_bytes_written = 0;
+ // w->io_cancelled_write_bytes = 0;
+
+ // zero file counters
+ if(w->target_fds) {
+ memset(w->target_fds, 0, sizeof(int) * w->target_fds_size);
+ w->openfiles = 0;
+ w->openpipes = 0;
+ w->opensockets = 0;
+ w->openinotifies = 0;
+ w->openeventfds = 0;
+ w->opentimerfds = 0;
+ w->opensignalfds = 0;
+ w->openeventpolls = 0;
+ w->openother = 0;
+ }
+ }
+
+ return count;
+}
+
+static inline void reallocate_target_fds(struct target *w) {
+ if(unlikely(!w))
+ return;
+
+ if(unlikely(!w->target_fds || w->target_fds_size < all_files_size)) {
+ w->target_fds = reallocz(w->target_fds, sizeof(int) * all_files_size);
+ memset(&w->target_fds[w->target_fds_size], 0, sizeof(int) * (all_files_size - w->target_fds_size));
+ w->target_fds_size = all_files_size;
+ }
+}
+
+static inline void aggregate_fd_on_target(int fd, struct target *w) {
+ if(unlikely(!w))
+ return;
+
+ if(unlikely(w->target_fds[fd])) {
+ // it is already aggregated
+ // just increase its usage counter
+ w->target_fds[fd]++;
+ return;
+ }
+
+ // increase its usage counter
+ // so that we will not add it again
+ w->target_fds[fd]++;
+
+ switch(all_files[fd].type) {
+ case FILETYPE_FILE:
+ w->openfiles++;
+ break;
+
+ case FILETYPE_PIPE:
+ w->openpipes++;
+ break;
+
+ case FILETYPE_SOCKET:
+ w->opensockets++;
+ break;
+
+ case FILETYPE_INOTIFY:
+ w->openinotifies++;
+ break;
+
+ case FILETYPE_EVENTFD:
+ w->openeventfds++;
+ break;
+
+ case FILETYPE_TIMERFD:
+ w->opentimerfds++;
+ break;
+
+ case FILETYPE_SIGNALFD:
+ w->opensignalfds++;
+ break;
+
+ case FILETYPE_EVENTPOLL:
+ w->openeventpolls++;
+ break;
+
+ case FILETYPE_OTHER:
+ w->openother++;
+ break;
+ }
+}
+
+static inline void aggregate_pid_fds_on_targets(struct pid_stat *p) {
+
+ if(unlikely(!p->updated)) {
+ // the process is not running
+ return;
+ }
+
+ struct target *w = p->target, *u = p->user_target, *g = p->group_target;
+
+ reallocate_target_fds(w);
+ reallocate_target_fds(u);
+ reallocate_target_fds(g);
+
+ size_t c, size = p->fds_size;
+ struct pid_fd *fds = p->fds;
+ for(c = 0; c < size ;c++) {
+ int fd = fds[c].fd;
+
+ if(likely(fd <= 0 || fd >= all_files_size))
+ continue;
+
+ aggregate_fd_on_target(fd, w);
+ aggregate_fd_on_target(fd, u);
+ aggregate_fd_on_target(fd, g);
+ }
+}
+
+static inline void aggregate_pid_on_target(struct target *w, struct pid_stat *p, struct target *o) {
+ (void)o;
+
+ if(unlikely(!p->updated)) {
+ // the process is not running
+ return;
+ }
+
+ if(unlikely(!w)) {
+ error("pid %d %s was left without a target!", p->pid, p->comm);
+ return;
+ }
+
+ w->cutime += p->cutime;
+ w->cstime += p->cstime;
+ w->cgtime += p->cgtime;
+ w->cminflt += p->cminflt;
+ w->cmajflt += p->cmajflt;
+
+ w->utime += p->utime;
+ w->stime += p->stime;
+ w->gtime += p->gtime;
+ w->minflt += p->minflt;
+ w->majflt += p->majflt;
+
+ // w->rss += p->rss;
+
+ w->status_vmsize += p->status_vmsize;
+ w->status_vmrss += p->status_vmrss;
+ w->status_vmshared += p->status_vmshared;
+ w->status_rssfile += p->status_rssfile;
+ w->status_rssshmem += p->status_rssshmem;
+ w->status_vmswap += p->status_vmswap;
+
+ w->io_logical_bytes_read += p->io_logical_bytes_read;
+ w->io_logical_bytes_written += p->io_logical_bytes_written;
+ // w->io_read_calls += p->io_read_calls;
+ // w->io_write_calls += p->io_write_calls;
+ w->io_storage_bytes_read += p->io_storage_bytes_read;
+ w->io_storage_bytes_written += p->io_storage_bytes_written;
+ // w->io_cancelled_write_bytes += p->io_cancelled_write_bytes;
+
+ w->processes++;
+ w->num_threads += p->num_threads;
+
+ if(unlikely(debug_enabled || w->debug_enabled))
+ debug_log_int("aggregating '%s' pid %d on target '%s' utime=" KERNEL_UINT_FORMAT ", stime=" KERNEL_UINT_FORMAT ", gtime=" KERNEL_UINT_FORMAT ", cutime=" KERNEL_UINT_FORMAT ", cstime=" KERNEL_UINT_FORMAT ", cgtime=" KERNEL_UINT_FORMAT ", minflt=" KERNEL_UINT_FORMAT ", majflt=" KERNEL_UINT_FORMAT ", cminflt=" KERNEL_UINT_FORMAT ", cmajflt=" KERNEL_UINT_FORMAT "", p->comm, p->pid, w->name, p->utime, p->stime, p->gtime, p->cutime, p->cstime, p->cgtime, p->minflt, p->majflt, p->cminflt, p->cmajflt);
+}
+
+static void calculate_netdata_statistics(void) {
+
+ apply_apps_groups_targets_inheritance();
+
+ zero_all_targets(users_root_target);
+ zero_all_targets(groups_root_target);
+ apps_groups_targets_count = zero_all_targets(apps_groups_root_target);
+
+ // this has to be done, before the cleanup
+ struct pid_stat *p = NULL;
+ struct target *w = NULL, *o = NULL;
+
+ // concentrate everything on the targets
+ for(p = root_of_pids; p ; p = p->next) {
+
+ // --------------------------------------------------------------------
+ // apps_groups target
+
+ aggregate_pid_on_target(p->target, p, NULL);
+
+
+ // --------------------------------------------------------------------
+ // user target
+
+ o = p->user_target;
+ if(likely(p->user_target && p->user_target->uid == p->uid))
+ w = p->user_target;
+ else {
+ if(unlikely(debug_enabled && p->user_target))
+ debug_log("pid %d (%s) switched user from %u (%s) to %u.", p->pid, p->comm, p->user_target->uid, p->user_target->name, p->uid);
+
+ w = p->user_target = get_users_target(p->uid);
+ }
+
+ aggregate_pid_on_target(w, p, o);
+
+
+ // --------------------------------------------------------------------
+ // user group target
+
+ o = p->group_target;
+ if(likely(p->group_target && p->group_target->gid == p->gid))
+ w = p->group_target;
+ else {
+ if(unlikely(debug_enabled && p->group_target))
+ debug_log("pid %d (%s) switched group from %u (%s) to %u.", p->pid, p->comm, p->group_target->gid, p->group_target->name, p->gid);
+
+ w = p->group_target = get_groups_target(p->gid);
+ }
+
+ aggregate_pid_on_target(w, p, o);
+
+
+ // --------------------------------------------------------------------
+ // aggregate all file descriptors
+
+ if(enable_file_charts)
+ aggregate_pid_fds_on_targets(p);
+ }
+
+ cleanup_exited_pids();
+}
+
+// ----------------------------------------------------------------------------
+// update chart dimensions
+
+static inline void send_BEGIN(const char *type, const char *id, usec_t usec) {
+ fprintf(stdout, "BEGIN %s.%s %llu\n", type, id, usec);
+}
+
+static inline void send_SET(const char *name, kernel_uint_t value) {
+ fprintf(stdout, "SET %s = " KERNEL_UINT_FORMAT "\n", name, value);
+}
+
+static inline void send_END(void) {
+ fprintf(stdout, "END\n");
+}
+
+void send_resource_usage_to_netdata(usec_t dt) {
+ static struct timeval last = { 0, 0 };
+ static struct rusage me_last;
+
+ struct timeval now;
+ struct rusage me;
+
+ usec_t cpuuser;
+ usec_t cpusyst;
+
+ if(!last.tv_sec) {
+ now_monotonic_timeval(&last);
+ getrusage(RUSAGE_SELF, &me_last);
+
+ cpuuser = 0;
+ cpusyst = 0;
+ }
+ else {
+ now_monotonic_timeval(&now);
+ getrusage(RUSAGE_SELF, &me);
+
+ cpuuser = me.ru_utime.tv_sec * USEC_PER_SEC + me.ru_utime.tv_usec;
+ cpusyst = me.ru_stime.tv_sec * USEC_PER_SEC + me.ru_stime.tv_usec;
+
+ memmove(&last, &now, sizeof(struct timeval));
+ memmove(&me_last, &me, sizeof(struct rusage));
+ }
+
+ static char created_charts = 0;
+ if(unlikely(!created_charts)) {
+ created_charts = 1;
+
+ fprintf(stdout,
+ "CHART netdata.apps_cpu '' 'Apps Plugin CPU' 'milliseconds/s' apps.plugin netdata.apps_cpu stacked 140000 %1$d\n"
+ "DIMENSION user '' incremental 1 1000\n"
+ "DIMENSION system '' incremental 1 1000\n"
+ "CHART netdata.apps_sizes '' 'Apps Plugin Files' 'files/s' apps.plugin netdata.apps_sizes line 140001 %1$d\n"
+ "DIMENSION calls '' incremental 1 1\n"
+ "DIMENSION files '' incremental 1 1\n"
+ "DIMENSION filenames '' incremental 1 1\n"
+ "DIMENSION inode_changes '' incremental 1 1\n"
+ "DIMENSION link_changes '' incremental 1 1\n"
+ "DIMENSION pids '' absolute 1 1\n"
+ "DIMENSION fds '' absolute 1 1\n"
+ "DIMENSION targets '' absolute 1 1\n"
+ "DIMENSION new_pids 'new pids' incremental 1 1\n"
+ , update_every
+ );
+
+#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
+ fprintf(stdout,
+ "CHART netdata.apps_fix '' 'Apps Plugin Normalization Ratios' 'percentage' apps.plugin netdata.apps_fix line 140002 %1$d\n"
+ "DIMENSION utime '' absolute 1 %2$llu\n"
+ "DIMENSION stime '' absolute 1 %2$llu\n"
+ "DIMENSION gtime '' absolute 1 %2$llu\n"
+ "DIMENSION minflt '' absolute 1 %2$llu\n"
+ "DIMENSION majflt '' absolute 1 %2$llu\n"
+ , update_every
+ , RATES_DETAIL
+ );
+
+ if(include_exited_childs)
+ fprintf(stdout,
+ "CHART netdata.apps_children_fix '' 'Apps Plugin Exited Children Normalization Ratios' 'percentage' apps.plugin netdata.apps_children_fix line 140003 %1$d\n"
+ "DIMENSION cutime '' absolute 1 %2$llu\n"
+ "DIMENSION cstime '' absolute 1 %2$llu\n"
+ "DIMENSION cgtime '' absolute 1 %2$llu\n"
+ "DIMENSION cminflt '' absolute 1 %2$llu\n"
+ "DIMENSION cmajflt '' absolute 1 %2$llu\n"
+ , update_every
+ , RATES_DETAIL
+ );
+#endif
+
+ }
+
+ fprintf(stdout,
+ "BEGIN netdata.apps_cpu %llu\n"
+ "SET user = %llu\n"
+ "SET system = %llu\n"
+ "END\n"
+ "BEGIN netdata.apps_sizes %llu\n"
+ "SET calls = %zu\n"
+ "SET files = %zu\n"
+ "SET filenames = %zu\n"
+ "SET inode_changes = %zu\n"
+ "SET link_changes = %zu\n"
+ "SET pids = %zu\n"
+ "SET fds = %d\n"
+ "SET targets = %zu\n"
+ "SET new_pids = %zu\n"
+ "END\n"
+ , dt
+ , cpuuser
+ , cpusyst
+ , dt
+ , calls_counter
+ , file_counter
+ , filenames_allocated_counter
+ , inodes_changed_counter
+ , links_changed_counter
+ , all_pids_count
+ , all_files_len
+ , apps_groups_targets_count
+ , targets_assignment_counter
+ );
+
+#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
+ fprintf(stdout,
+ "BEGIN netdata.apps_fix %llu\n"
+ "SET utime = %u\n"
+ "SET stime = %u\n"
+ "SET gtime = %u\n"
+ "SET minflt = %u\n"
+ "SET majflt = %u\n"
+ "END\n"
+ , dt
+ , (unsigned int)(utime_fix_ratio * 100 * RATES_DETAIL)
+ , (unsigned int)(stime_fix_ratio * 100 * RATES_DETAIL)
+ , (unsigned int)(gtime_fix_ratio * 100 * RATES_DETAIL)
+ , (unsigned int)(minflt_fix_ratio * 100 * RATES_DETAIL)
+ , (unsigned int)(majflt_fix_ratio * 100 * RATES_DETAIL)
+ );
+
+ if(include_exited_childs)
+ fprintf(stdout,
+ "BEGIN netdata.apps_children_fix %llu\n"
+ "SET cutime = %u\n"
+ "SET cstime = %u\n"
+ "SET cgtime = %u\n"
+ "SET cminflt = %u\n"
+ "SET cmajflt = %u\n"
+ "END\n"
+ , dt
+ , (unsigned int)(cutime_fix_ratio * 100 * RATES_DETAIL)
+ , (unsigned int)(cstime_fix_ratio * 100 * RATES_DETAIL)
+ , (unsigned int)(cgtime_fix_ratio * 100 * RATES_DETAIL)
+ , (unsigned int)(cminflt_fix_ratio * 100 * RATES_DETAIL)
+ , (unsigned int)(cmajflt_fix_ratio * 100 * RATES_DETAIL)
+ );
+#endif
+}
+
+#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
+static void normalize_utilization(struct target *root) {
+ struct target *w;
+
+ // childs processing introduces spikes
+ // here we try to eliminate them by disabling childs processing either for specific dimensions
+ // or entirely. Of course, either way, we disable it just a single iteration.
+
+ kernel_uint_t max_time = processors * system_hz * RATES_DETAIL;
+ kernel_uint_t utime = 0, cutime = 0, stime = 0, cstime = 0, gtime = 0, cgtime = 0, minflt = 0, cminflt = 0, majflt = 0, cmajflt = 0;
+
+ if(global_utime > max_time) global_utime = max_time;
+ if(global_stime > max_time) global_stime = max_time;
+ if(global_gtime > max_time) global_gtime = max_time;
+
+ for(w = root; w ; w = w->next) {
+ if(w->target || (!w->processes && !w->exposed)) continue;
+
+ utime += w->utime;
+ stime += w->stime;
+ gtime += w->gtime;
+ cutime += w->cutime;
+ cstime += w->cstime;
+ cgtime += w->cgtime;
+
+ minflt += w->minflt;
+ majflt += w->majflt;
+ cminflt += w->cminflt;
+ cmajflt += w->cmajflt;
+ }
+
+ if((global_utime || global_stime || global_gtime) && (utime || stime || gtime)) {
+ if(global_utime + global_stime + global_gtime > utime + cutime + stime + cstime + gtime + cgtime) {
+ // everything we collected fits
+ utime_fix_ratio =
+ stime_fix_ratio =
+ gtime_fix_ratio =
+ cutime_fix_ratio =
+ cstime_fix_ratio =
+ cgtime_fix_ratio = 1.0; //(double)(global_utime + global_stime) / (double)(utime + cutime + stime + cstime);
+ }
+ else if(global_utime + global_stime > utime + stime) {
+ // childrens resources are too high
+ // lower only the children resources
+ utime_fix_ratio =
+ stime_fix_ratio =
+ gtime_fix_ratio = 1.0;
+ cutime_fix_ratio =
+ cstime_fix_ratio =
+ cgtime_fix_ratio = (double)((global_utime + global_stime) - (utime + stime)) / (double)(cutime + cstime);
+ }
+ else {
+ // even running processes are unrealistic
+ // zero the children resources
+ // lower the running processes resources
+ utime_fix_ratio =
+ stime_fix_ratio =
+ gtime_fix_ratio = (double)(global_utime + global_stime) / (double)(utime + stime);
+ cutime_fix_ratio =
+ cstime_fix_ratio =
+ cgtime_fix_ratio = 0.0;
+ }
+ }
+ else {
+ utime_fix_ratio =
+ stime_fix_ratio =
+ gtime_fix_ratio =
+ cutime_fix_ratio =
+ cstime_fix_ratio =
+ cgtime_fix_ratio = 0.0;
+ }
+
+ if(utime_fix_ratio > 1.0) utime_fix_ratio = 1.0;
+ if(cutime_fix_ratio > 1.0) cutime_fix_ratio = 1.0;
+ if(stime_fix_ratio > 1.0) stime_fix_ratio = 1.0;
+ if(cstime_fix_ratio > 1.0) cstime_fix_ratio = 1.0;
+ if(gtime_fix_ratio > 1.0) gtime_fix_ratio = 1.0;
+ if(cgtime_fix_ratio > 1.0) cgtime_fix_ratio = 1.0;
+
+ // if(utime_fix_ratio < 0.0) utime_fix_ratio = 0.0;
+ // if(cutime_fix_ratio < 0.0) cutime_fix_ratio = 0.0;
+ // if(stime_fix_ratio < 0.0) stime_fix_ratio = 0.0;
+ // if(cstime_fix_ratio < 0.0) cstime_fix_ratio = 0.0;
+ // if(gtime_fix_ratio < 0.0) gtime_fix_ratio = 0.0;
+ // if(cgtime_fix_ratio < 0.0) cgtime_fix_ratio = 0.0;
+
+ // TODO
+ // we use cpu time to normalize page faults
+ // the problem is that to find the proper max values
+ // for page faults we have to parse /proc/vmstat
+ // which is quite big to do it again (netdata does it already)
+ //
+ // a better solution could be to somehow have netdata
+ // do this normalization for us
+
+ if(utime || stime || gtime)
+ majflt_fix_ratio =
+ minflt_fix_ratio = (double)(utime * utime_fix_ratio + stime * stime_fix_ratio + gtime * gtime_fix_ratio) / (double)(utime + stime + gtime);
+ else
+ minflt_fix_ratio =
+ majflt_fix_ratio = 1.0;
+
+ if(cutime || cstime || cgtime)
+ cmajflt_fix_ratio =
+ cminflt_fix_ratio = (double)(cutime * cutime_fix_ratio + cstime * cstime_fix_ratio + cgtime * cgtime_fix_ratio) / (double)(cutime + cstime + cgtime);
+ else
+ cminflt_fix_ratio =
+ cmajflt_fix_ratio = 1.0;
+
+ // the report
+
+ debug_log(
+ "SYSTEM: u=" KERNEL_UINT_FORMAT " s=" KERNEL_UINT_FORMAT " g=" KERNEL_UINT_FORMAT " "
+ "COLLECTED: u=" KERNEL_UINT_FORMAT " s=" KERNEL_UINT_FORMAT " g=" KERNEL_UINT_FORMAT " cu=" KERNEL_UINT_FORMAT " cs=" KERNEL_UINT_FORMAT " cg=" KERNEL_UINT_FORMAT " "
+ "DELTA: u=" KERNEL_UINT_FORMAT " s=" KERNEL_UINT_FORMAT " g=" KERNEL_UINT_FORMAT " "
+ "FIX: u=%0.2f s=%0.2f g=%0.2f cu=%0.2f cs=%0.2f cg=%0.2f "
+ "FINALLY: u=" KERNEL_UINT_FORMAT " s=" KERNEL_UINT_FORMAT " g=" KERNEL_UINT_FORMAT " cu=" KERNEL_UINT_FORMAT " cs=" KERNEL_UINT_FORMAT " cg=" KERNEL_UINT_FORMAT " "
+ , global_utime
+ , global_stime
+ , global_gtime
+ , utime
+ , stime
+ , gtime
+ , cutime
+ , cstime
+ , cgtime
+ , utime + cutime - global_utime
+ , stime + cstime - global_stime
+ , gtime + cgtime - global_gtime
+ , utime_fix_ratio
+ , stime_fix_ratio
+ , gtime_fix_ratio
+ , cutime_fix_ratio
+ , cstime_fix_ratio
+ , cgtime_fix_ratio
+ , (kernel_uint_t)(utime * utime_fix_ratio)
+ , (kernel_uint_t)(stime * stime_fix_ratio)
+ , (kernel_uint_t)(gtime * gtime_fix_ratio)
+ , (kernel_uint_t)(cutime * cutime_fix_ratio)
+ , (kernel_uint_t)(cstime * cstime_fix_ratio)
+ , (kernel_uint_t)(cgtime * cgtime_fix_ratio)
+ );
+}
+#else // ALL_PIDS_ARE_READ_INSTANTLY == 1
+static void normalize_utilization(struct target *root) {
+ (void)root;
+}
+#endif // ALL_PIDS_ARE_READ_INSTANTLY
+
+static void send_collected_data_to_netdata(struct target *root, const char *type, usec_t dt) {
+ struct target *w;
+
+ send_BEGIN(type, "cpu", dt);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ send_SET(w->name, (kernel_uint_t)(w->utime * utime_fix_ratio) + (kernel_uint_t)(w->stime * stime_fix_ratio) + (kernel_uint_t)(w->gtime * gtime_fix_ratio) + (include_exited_childs?((kernel_uint_t)(w->cutime * cutime_fix_ratio) + (kernel_uint_t)(w->cstime * cstime_fix_ratio) + (kernel_uint_t)(w->cgtime * cgtime_fix_ratio)):0ULL));
+ }
+ send_END();
+
+ send_BEGIN(type, "cpu_user", dt);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ send_SET(w->name, (kernel_uint_t)(w->utime * utime_fix_ratio) + (include_exited_childs?((kernel_uint_t)(w->cutime * cutime_fix_ratio)):0ULL));
+ }
+ send_END();
+
+ send_BEGIN(type, "cpu_system", dt);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ send_SET(w->name, (kernel_uint_t)(w->stime * stime_fix_ratio) + (include_exited_childs?((kernel_uint_t)(w->cstime * cstime_fix_ratio)):0ULL));
+ }
+ send_END();
+
+ if(show_guest_time) {
+ send_BEGIN(type, "cpu_guest", dt);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ send_SET(w->name, (kernel_uint_t)(w->gtime * gtime_fix_ratio) + (include_exited_childs?((kernel_uint_t)(w->cgtime * cgtime_fix_ratio)):0ULL));
+ }
+ send_END();
+ }
+
+ send_BEGIN(type, "threads", dt);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ send_SET(w->name, w->num_threads);
+ }
+ send_END();
+
+ send_BEGIN(type, "processes", dt);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ send_SET(w->name, w->processes);
+ }
+ send_END();
+
+ send_BEGIN(type, "mem", dt);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ send_SET(w->name, (w->status_vmrss > w->status_vmshared)?(w->status_vmrss - w->status_vmshared):0ULL);
+ }
+ send_END();
+
+ send_BEGIN(type, "vmem", dt);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ send_SET(w->name, w->status_vmsize);
+ }
+ send_END();
+
+#ifndef __FreeBSD__
+ send_BEGIN(type, "swap", dt);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ send_SET(w->name, w->status_vmswap);
+ }
+ send_END();
+#endif
+
+ send_BEGIN(type, "minor_faults", dt);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ send_SET(w->name, (kernel_uint_t)(w->minflt * minflt_fix_ratio) + (include_exited_childs?((kernel_uint_t)(w->cminflt * cminflt_fix_ratio)):0ULL));
+ }
+ send_END();
+
+ send_BEGIN(type, "major_faults", dt);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ send_SET(w->name, (kernel_uint_t)(w->majflt * majflt_fix_ratio) + (include_exited_childs?((kernel_uint_t)(w->cmajflt * cmajflt_fix_ratio)):0ULL));
+ }
+ send_END();
+
+#ifndef __FreeBSD__
+ send_BEGIN(type, "lreads", dt);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ send_SET(w->name, w->io_logical_bytes_read);
+ }
+ send_END();
+
+ send_BEGIN(type, "lwrites", dt);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ send_SET(w->name, w->io_logical_bytes_written);
+ }
+ send_END();
+#endif
+
+ send_BEGIN(type, "preads", dt);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ send_SET(w->name, w->io_storage_bytes_read);
+ }
+ send_END();
+
+ send_BEGIN(type, "pwrites", dt);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ send_SET(w->name, w->io_storage_bytes_written);
+ }
+ send_END();
+
+ if(enable_file_charts) {
+ send_BEGIN(type, "files", dt);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed))
+ send_SET(w->name, w->openfiles);
+ }
+ send_END();
+
+ send_BEGIN(type, "sockets", dt);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed))
+ send_SET(w->name, w->opensockets);
+ }
+ send_END();
+
+ send_BEGIN(type, "pipes", dt);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed))
+ send_SET(w->name, w->openpipes);
+ }
+ send_END();
+ }
+}
+
+
+// ----------------------------------------------------------------------------
+// generate the charts
+
+static void send_charts_updates_to_netdata(struct target *root, const char *type, const char *title)
+{
+ struct target *w;
+ int newly_added = 0;
+
+ for(w = root ; w ; w = w->next) {
+ if (w->target) continue;
+
+ if (!w->exposed && w->processes) {
+ newly_added++;
+ w->exposed = 1;
+ if (debug_enabled || w->debug_enabled)
+ debug_log_int("%s just added - regenerating charts.", w->name);
+ }
+ }
+
+ // nothing more to show
+ if(!newly_added && show_guest_time == show_guest_time_old) return;
+
+ // we have something new to show
+ // update the charts
+ fprintf(stdout, "CHART %s.cpu '' '%s CPU Time (%d%% = %d core%s)' 'cpu time %%' cpu %s.cpu stacked 20001 %d\n", type, title, (processors * 100), processors, (processors>1)?"s":"", type, update_every);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 %llu %s\n", w->name, system_hz * RATES_DETAIL / 100, w->hidden ? "hidden" : "");
+ }
+
+ fprintf(stdout, "CHART %s.mem '' '%s Real Memory (w/o shared)' 'MB' mem %s.mem stacked 20003 %d\n", type, title, type, update_every);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute %ld %ld\n", w->name, 1L, 1024L);
+ }
+
+ fprintf(stdout, "CHART %s.vmem '' '%s Virtual Memory Size' 'MB' mem %s.vmem stacked 20005 %d\n", type, title, type, update_every);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute %ld %ld\n", w->name, 1L, 1024L);
+ }
+
+ fprintf(stdout, "CHART %s.threads '' '%s Threads' 'threads' processes %s.threads stacked 20006 %d\n", type, title, type, update_every);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name);
+ }
+
+ fprintf(stdout, "CHART %s.processes '' '%s Processes' 'processes' processes %s.processes stacked 20007 %d\n", type, title, type, update_every);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name);
+ }
+
+ fprintf(stdout, "CHART %s.cpu_user '' '%s CPU User Time (%d%% = %d core%s)' 'cpu time %%' cpu %s.cpu_user stacked 20020 %d\n", type, title, (processors * 100), processors, (processors>1)?"s":"", type, update_every);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, system_hz * RATES_DETAIL / 100LLU);
+ }
+
+ fprintf(stdout, "CHART %s.cpu_system '' '%s CPU System Time (%d%% = %d core%s)' 'cpu time %%' cpu %s.cpu_system stacked 20021 %d\n", type, title, (processors * 100), processors, (processors>1)?"s":"", type, update_every);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, system_hz * RATES_DETAIL / 100LLU);
+ }
+
+ if(show_guest_time) {
+ fprintf(stdout, "CHART %s.cpu_guest '' '%s CPU Guest Time (%d%% = %d core%s)' 'cpu time %%' cpu %s.cpu_system stacked 20022 %d\n", type, title, (processors * 100), processors, (processors > 1) ? "s" : "", type, update_every);
+ for (w = root; w; w = w->next) {
+ if(unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, system_hz * RATES_DETAIL / 100LLU);
+ }
+ }
+
+#ifndef __FreeBSD__
+ fprintf(stdout, "CHART %s.swap '' '%s Swap Memory' 'MB' swap %s.swap stacked 20011 %d\n", type, title, type, update_every);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute %ld %ld\n", w->name, 1L, 1024L);
+ }
+#endif
+
+ fprintf(stdout, "CHART %s.major_faults '' '%s Major Page Faults (swap read)' 'page faults/s' swap %s.major_faults stacked 20012 %d\n", type, title, type, update_every);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, RATES_DETAIL);
+ }
+
+ fprintf(stdout, "CHART %s.minor_faults '' '%s Minor Page Faults' 'page faults/s' mem %s.minor_faults stacked 20011 %d\n", type, title, type, update_every);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, RATES_DETAIL);
+ }
+
+#ifdef __FreeBSD__
+ fprintf(stdout, "CHART %s.preads '' '%s Disk Reads' 'blocks/s' disk %s.preads stacked 20002 %d\n", type, title, type, update_every);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, RATES_DETAIL);
+ }
+
+ fprintf(stdout, "CHART %s.pwrites '' '%s Disk Writes' 'blocks/s' disk %s.pwrites stacked 20002 %d\n", type, title, type, update_every);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, RATES_DETAIL);
+ }
+#else
+ fprintf(stdout, "CHART %s.preads '' '%s Disk Reads' 'kilobytes/s' disk %s.preads stacked 20002 %d\n", type, title, type, update_every);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, 1024LLU * RATES_DETAIL);
+ }
+
+ fprintf(stdout, "CHART %s.pwrites '' '%s Disk Writes' 'kilobytes/s' disk %s.pwrites stacked 20002 %d\n", type, title, type, update_every);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, 1024LLU * RATES_DETAIL);
+ }
+
+ fprintf(stdout, "CHART %s.lreads '' '%s Disk Logical Reads' 'kilobytes/s' disk %s.lreads stacked 20042 %d\n", type, title, type, update_every);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, 1024LLU * RATES_DETAIL);
+ }
+
+ fprintf(stdout, "CHART %s.lwrites '' '%s I/O Logical Writes' 'kilobytes/s' disk %s.lwrites stacked 20042 %d\n", type, title, type, update_every);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, 1024LLU * RATES_DETAIL);
+ }
+#endif
+
+ if(enable_file_charts) {
+ fprintf(stdout, "CHART %s.files '' '%s Open Files' 'open files' disk %s.files stacked 20050 %d\n", type,
+ title, type, update_every);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name);
+ }
+
+ fprintf(stdout, "CHART %s.sockets '' '%s Open Sockets' 'open sockets' net %s.sockets stacked 20051 %d\n",
+ type, title, type, update_every);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name);
+ }
+
+ fprintf(stdout, "CHART %s.pipes '' '%s Pipes' 'open pipes' processes %s.pipes stacked 20053 %d\n", type,
+ title, type, update_every);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name);
+ }
+ }
+}
+
+
+// ----------------------------------------------------------------------------
+// parse command line arguments
+
+int check_proc_1_io() {
+ int ret = 0;
+
+ procfile *ff = procfile_open("/proc/1/io", NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO);
+ if(!ff) goto cleanup;
+
+ ff = procfile_readall(ff);
+ if(!ff) goto cleanup;
+
+ ret = 1;
+
+cleanup:
+ procfile_close(ff);
+ return ret;
+}
+
+static void parse_args(int argc, char **argv)
+{
+ int i, freq = 0;
+
+ for(i = 1; i < argc; i++) {
+ if(!freq) {
+ int n = (int)str2l(argv[i]);
+ if(n > 0) {
+ freq = n;
+ continue;
+ }
+ }
+
+ if(strcmp("version", argv[i]) == 0 || strcmp("-version", argv[i]) == 0 || strcmp("--version", argv[i]) == 0 || strcmp("-v", argv[i]) == 0 || strcmp("-V", argv[i]) == 0) {
+ printf("apps.plugin %s\n", VERSION);
+ exit(0);
+ }
+
+ if(strcmp("test-permissions", argv[i]) == 0 || strcmp("-t", argv[i]) == 0) {
+ if(!check_proc_1_io()) {
+ perror("Tried to read /proc/1/io and it failed");
+ exit(1);
+ }
+ printf("OK\n");
+ exit(0);
+ }
+
+ if(strcmp("debug", argv[i]) == 0) {
+#ifdef NETDATA_INTERNAL_CHECKS
+ debug_enabled = 1;
+#else
+ fprintf(stderr, "apps.plugin has been compiled without debugging\n");
+#endif
+ continue;
+ }
+
+#ifndef __FreeBSD__
+ if(strcmp("fds-cache-secs", argv[i]) == 0) {
+ if(argc <= i + 1) {
+ fprintf(stderr, "Parameter 'fds-cache-secs' requires a number as argument.\n");
+ exit(1);
+ }
+ i++;
+ max_fds_cache_seconds = str2i(argv[i]);
+ if(max_fds_cache_seconds < 0) max_fds_cache_seconds = 0;
+ continue;
+ }
+#endif
+
+ if(strcmp("no-childs", argv[i]) == 0 || strcmp("without-childs", argv[i]) == 0) {
+ include_exited_childs = 0;
+ continue;
+ }
+
+ if(strcmp("with-childs", argv[i]) == 0) {
+ include_exited_childs = 1;
+ continue;
+ }
+
+ if(strcmp("with-guest", argv[i]) == 0) {
+ enable_guest_charts = 1;
+ continue;
+ }
+
+ if(strcmp("no-guest", argv[i]) == 0 || strcmp("without-guest", argv[i]) == 0) {
+ enable_guest_charts = 0;
+ continue;
+ }
+
+ if(strcmp("with-files", argv[i]) == 0) {
+ enable_file_charts = 1;
+ continue;
+ }
+
+ if(strcmp("no-files", argv[i]) == 0 || strcmp("without-files", argv[i]) == 0) {
+ enable_file_charts = 0;
+ continue;
+ }
+
+ if(strcmp("no-users", argv[i]) == 0 || strcmp("without-users", argv[i]) == 0) {
+ enable_users_charts = 0;
+ continue;
+ }
+
+ if(strcmp("no-groups", argv[i]) == 0 || strcmp("without-groups", argv[i]) == 0) {
+ enable_groups_charts = 0;
+ continue;
+ }
+
+ if(strcmp("-h", argv[i]) == 0 || strcmp("--help", argv[i]) == 0) {
+ fprintf(stderr,
+ "\n"
+ " netdata apps.plugin %s\n"
+ " Copyright (C) 2016-2017 Costa Tsaousis <costa@tsaousis.gr>\n"
+ " Released under GNU General Public License v3 or later.\n"
+ " All rights reserved.\n"
+ "\n"
+ " This program is a data collector plugin for netdata.\n"
+ "\n"
+ " Available command line options:\n"
+ "\n"
+ " SECONDS set the data collection frequency\n"
+ "\n"
+ " debug enable debugging (lot of output)\n"
+ "\n"
+ " with-childs\n"
+ " without-childs enable / disable aggregating exited\n"
+ " children resources into parents\n"
+ " (default is enabled)\n"
+ "\n"
+ " with-guest\n"
+ " without-guest enable / disable reporting guest charts\n"
+ " (default is disabled)\n"
+ "\n"
+ " with-files\n"
+ " without-files enable / disable reporting files, sockets, pipes\n"
+ " (default is enabled)\n"
+ "\n"
+#ifndef __FreeBSD__
+ " fds-cache-secs N cache the files of processed for N seconds\n"
+ " caching is adaptive per file (when a file\n"
+ " is found, it starts at 0 and while the file\n"
+ " remains open, it is incremented up to the\n"
+ " max given)\n"
+ " (default is %d seconds)\n"
+ "\n"
+#endif
+ " version or -v or -V print program version and exit\n"
+ "\n"
+ , VERSION
+#ifndef __FreeBSD__
+ , max_fds_cache_seconds
+#endif
+ );
+ exit(1);
+ }
+
+ error("Cannot understand option %s", argv[i]);
+ exit(1);
+ }
+
+ if(freq > 0) update_every = freq;
+
+ if(read_apps_groups_conf(user_config_dir, "groups")) {
+ info("Cannot read process groups configuration file '%s/apps_groups.conf'. Will try '%s/apps_groups.conf'", user_config_dir, stock_config_dir);
+
+ if(read_apps_groups_conf(stock_config_dir, "groups")) {
+ error("Cannot read process groups '%s/apps_groups.conf'. There are no internal defaults. Failing.", stock_config_dir);
+ exit(1);
+ }
+ else
+ info("Loaded config file '%s/apps_groups.conf'", stock_config_dir);
+ }
+ else
+ info("Loaded config file '%s/apps_groups.conf'", user_config_dir);
+}
+
+static int am_i_running_as_root() {
+ uid_t uid = getuid(), euid = geteuid();
+
+ if(uid == 0 || euid == 0) {
+ if(debug_enabled) info("I am running with escalated privileges, uid = %u, euid = %u.", uid, euid);
+ return 1;
+ }
+
+ if(debug_enabled) info("I am not running with escalated privileges, uid = %u, euid = %u.", uid, euid);
+ return 0;
+}
+
+#ifdef HAVE_CAPABILITY
+static int check_capabilities() {
+ cap_t caps = cap_get_proc();
+ if(!caps) {
+ error("Cannot get current capabilities.");
+ return 0;
+ }
+ else if(debug_enabled)
+ info("Received my capabilities from the system.");
+
+ int ret = 1;
+
+ cap_flag_value_t cfv = CAP_CLEAR;
+ if(cap_get_flag(caps, CAP_DAC_READ_SEARCH, CAP_EFFECTIVE, &cfv) == -1) {
+ error("Cannot find if CAP_DAC_READ_SEARCH is effective.");
+ ret = 0;
+ }
+ else {
+ if(cfv != CAP_SET) {
+ error("apps.plugin should run with CAP_DAC_READ_SEARCH.");
+ ret = 0;
+ }
+ else if(debug_enabled)
+ info("apps.plugin runs with CAP_DAC_READ_SEARCH.");
+ }
+
+ cfv = CAP_CLEAR;
+ if(cap_get_flag(caps, CAP_SYS_PTRACE, CAP_EFFECTIVE, &cfv) == -1) {
+ error("Cannot find if CAP_SYS_PTRACE is effective.");
+ ret = 0;
+ }
+ else {
+ if(cfv != CAP_SET) {
+ error("apps.plugin should run with CAP_SYS_PTRACE.");
+ ret = 0;
+ }
+ else if(debug_enabled)
+ info("apps.plugin runs with CAP_SYS_PTRACE.");
+ }
+
+ cap_free(caps);
+
+ return ret;
+}
+#else
+static int check_capabilities() {
+ return 0;
+}
+#endif
+
+int main(int argc, char **argv) {
+ // debug_flags = D_PROCFILE;
+
+ pagesize = (size_t)sysconf(_SC_PAGESIZE);
+
+ // set the name for logging
+ program_name = "apps.plugin";
+
+ // disable syslog for apps.plugin
+ error_log_syslog = 0;
+
+ // set errors flood protection to 100 logs per hour
+ error_log_errors_per_period = 100;
+ error_log_throttle_period = 3600;
+
+ // since apps.plugin runs as root, prevent it from opening symbolic links
+ procfile_open_flags = O_RDONLY|O_NOFOLLOW;
+
+ netdata_configured_host_prefix = getenv("NETDATA_HOST_PREFIX");
+ if(verify_netdata_host_prefix() == -1) exit(1);
+
+ user_config_dir = getenv("NETDATA_USER_CONFIG_DIR");
+ if(user_config_dir == NULL) {
+ // info("NETDATA_CONFIG_DIR is not passed from netdata");
+ user_config_dir = CONFIG_DIR;
+ }
+ // else info("Found NETDATA_USER_CONFIG_DIR='%s'", user_config_dir);
+
+ stock_config_dir = getenv("NETDATA_STOCK_CONFIG_DIR");
+ if(stock_config_dir == NULL) {
+ // info("NETDATA_CONFIG_DIR is not passed from netdata");
+ stock_config_dir = LIBCONFIG_DIR;
+ }
+ // else info("Found NETDATA_USER_CONFIG_DIR='%s'", user_config_dir);
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(debug_flags != 0) {
+ struct rlimit rl = { RLIM_INFINITY, RLIM_INFINITY };
+ if(setrlimit(RLIMIT_CORE, &rl) != 0)
+ info("Cannot request unlimited core dumps for debugging... Proceeding anyway...");
+#ifdef HAVE_SYS_PRCTL_H
+ prctl(PR_SET_DUMPABLE, 1, 0, 0, 0);
+#endif
+ }
+#endif /* NETDATA_INTERNAL_CHECKS */
+
+ procfile_adaptive_initial_allocation = 1;
+
+ time_t started_t = now_monotonic_sec();
+ get_system_HZ();
+ get_system_pid_max();
+ get_system_cpus();
+
+ parse_args(argc, argv);
+
+ if(!check_capabilities() && !am_i_running_as_root() && !check_proc_1_io()) {
+ uid_t uid = getuid(), euid = geteuid();
+#ifdef HAVE_CAPABILITY
+ error("apps.plugin should either run as root (now running with uid %u, euid %u) or have special capabilities. "
+ "Without these, apps.plugin cannot report disk I/O utilization of other processes. "
+ "To enable capabilities run: sudo setcap cap_dac_read_search,cap_sys_ptrace+ep %s; "
+ "To enable setuid to root run: sudo chown root:netdata %s; sudo chmod 4750 %s; "
+ , uid, euid, argv[0], argv[0], argv[0]
+ );
+#else
+ error("apps.plugin should either run as root (now running with uid %u, euid %u) or have special capabilities. "
+ "Without these, apps.plugin cannot report disk I/O utilization of other processes. "
+ "Your system does not support capabilities. "
+ "To enable setuid to root run: sudo chown root:netdata %s; sudo chmod 4750 %s; "
+ , uid, euid, argv[0], argv[0]
+ );
+#endif
+ }
+
+ info("started on pid %d", getpid());
+
+#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
+ all_pids_sortlist = callocz(sizeof(pid_t), (size_t)pid_max);
+#endif
+
+ all_pids = callocz(sizeof(struct pid_stat *), (size_t) pid_max);
+
+ usec_t step = update_every * USEC_PER_SEC;
+ global_iterations_counter = 1;
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ for(;1; global_iterations_counter++) {
+
+#ifdef NETDATA_PROFILING
+#warning "compiling for profiling"
+ static int profiling_count=0;
+ profiling_count++;
+ if(unlikely(profiling_count > 2000)) exit(0);
+ usec_t dt = update_every * USEC_PER_SEC;
+#else
+ usec_t dt = heartbeat_next(&hb, step);
+#endif
+
+ if(!collect_data_for_all_processes()) {
+ error("Cannot collect /proc data for running processes. Disabling apps.plugin...");
+ printf("DISABLE\n");
+ exit(1);
+ }
+
+ calculate_netdata_statistics();
+ normalize_utilization(apps_groups_root_target);
+
+ send_resource_usage_to_netdata(dt);
+
+ // this is smart enough to show only newly added apps, when needed
+ send_charts_updates_to_netdata(apps_groups_root_target, "apps", "Apps");
+
+ if(likely(enable_users_charts))
+ send_charts_updates_to_netdata(users_root_target, "users", "Users");
+
+ if(likely(enable_groups_charts))
+ send_charts_updates_to_netdata(groups_root_target, "groups", "User Groups");
+
+ send_collected_data_to_netdata(apps_groups_root_target, "apps", dt);
+
+ if(likely(enable_users_charts))
+ send_collected_data_to_netdata(users_root_target, "users", dt);
+
+ if(likely(enable_groups_charts))
+ send_collected_data_to_netdata(groups_root_target, "groups", dt);
+
+ fflush(stdout);
+
+ show_guest_time_old = show_guest_time;
+
+ debug_log("done Loop No %zu", global_iterations_counter);
+
+ // restart check (14400 seconds)
+ if(now_monotonic_sec() - started_t > 14400) exit(0);
+ }
+}
diff --git a/collectors/cgroups.plugin/Makefile.am b/collectors/cgroups.plugin/Makefile.am
new file mode 100644
index 000000000..eb3214ab2
--- /dev/null
+++ b/collectors/cgroups.plugin/Makefile.am
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+CLEANFILES = \
+ cgroup-name.sh \
+ $(NULL)
+
+include $(top_srcdir)/build/subst.inc
+SUFFIXES = .in
+
+dist_plugins_SCRIPTS = \
+ cgroup-name.sh \
+ cgroup-network-helper.sh \
+ $(NULL)
+
+dist_noinst_DATA = \
+ cgroup-name.sh.in \
+ README.md \
+ $(NULL)
diff --git a/collectors/cgroups.plugin/Makefile.in b/collectors/cgroups.plugin/Makefile.in
new file mode 100644
index 000000000..49c3c9834
--- /dev/null
+++ b/collectors/cgroups.plugin/Makefile.in
@@ -0,0 +1,563 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+DIST_COMMON = $(top_srcdir)/build/subst.inc $(srcdir)/Makefile.in \
+ $(srcdir)/Makefile.am $(dist_plugins_SCRIPTS) \
+ $(dist_noinst_DATA)
+subdir = collectors/cgroups.plugin
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+ test -z "$$files" \
+ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+ $(am__cd) "$$dir" && rm -f $$files; }; \
+ }
+am__installdirs = "$(DESTDIR)$(pluginsdir)"
+SCRIPTS = $(dist_plugins_SCRIPTS)
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+CLEANFILES = \
+ cgroup-name.sh \
+ $(NULL)
+
+SUFFIXES = .in
+dist_plugins_SCRIPTS = \
+ cgroup-name.sh \
+ cgroup-network-helper.sh \
+ $(NULL)
+
+dist_noinst_DATA = \
+ cgroup-name.sh.in \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .in
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/cgroups.plugin/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu collectors/cgroups.plugin/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+$(top_srcdir)/build/subst.inc:
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
+ done | \
+ sed -e 'p;s,.*/,,;n' \
+ -e 'h;s|.*|.|' \
+ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
+ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
+ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
+ if ($$2 == $$4) { files[d] = files[d] " " $$1; \
+ if (++n[d] == $(am__install_max)) { \
+ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
+ else { print "f", d "/" $$4, $$1 } } \
+ END { for (d in files) print "f", d, files[d] }' | \
+ while read type dir files; do \
+ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
+ test -z "$$files" || { \
+ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \
+ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \
+ } \
+ ; done
+
+uninstall-dist_pluginsSCRIPTS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \
+ files=`for p in $$list; do echo "$$p"; done | \
+ sed -e 's,.*/,,;$(transform)'`; \
+ dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir)
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(SCRIPTS) $(DATA)
+installdirs:
+ for dir in "$(DESTDIR)$(pluginsdir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+ -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am: install-dist_pluginsSCRIPTS
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-dist_pluginsSCRIPTS
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dist_pluginsSCRIPTS install-dvi \
+ install-dvi-am install-exec install-exec-am install-html \
+ install-html-am install-info install-info-am install-man \
+ install-pdf install-pdf-am install-ps install-ps-am \
+ install-strip installcheck installcheck-am installdirs \
+ maintainer-clean maintainer-clean-generic mostlyclean \
+ mostlyclean-generic pdf pdf-am ps ps-am tags-am uninstall \
+ uninstall-am uninstall-dist_pluginsSCRIPTS
+
+.in:
+ if sed \
+ -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \
+ -e 's#[@]sbindir_POST@#$(sbindir)#g' \
+ -e 's#[@]sysconfdir_POST@#$(sysconfdir)#g' \
+ -e 's#[@]pythondir_POST@#$(pythondir)#g' \
+ -e 's#[@]configdir_POST@#$(configdir)#g' \
+ -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \
+ -e 's#[@]cachedir_POST@#$(cachedir)#g' \
+ $< > $@.tmp; then \
+ mv "$@.tmp" "$@"; \
+ else \
+ rm -f "$@.tmp"; \
+ false; \
+ fi
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/collectors/cgroups.plugin/README.md b/collectors/cgroups.plugin/README.md
new file mode 100644
index 000000000..e78aa0440
--- /dev/null
+++ b/collectors/cgroups.plugin/README.md
@@ -0,0 +1,187 @@
+# cgroups.plugin
+
+You can monitor containers and virtual machines using **cgroups**.
+
+cgroups (or control groups), are a Linux kernel feature that provides accounting and resource usage limiting for processes. When cgroups are bundled with namespaces (i.e. isolation), they form what we usually call **containers**.
+
+cgroups are hierarchical, meaning that cgroups can contain child cgroups, which can contain more cgroups, etc. All accounting is reported (and resource usage limits are applied) also in a hierarchical way.
+
+To visualize cgroup metrics netdata provides configuration for cherry picking the cgroups of interest. By default (without any configuration) netdata should pick **systemd services**, all kinds of **containers** (lxc, docker, etc) and **virtual machines** spawn by managers that register them with cgroups (qemu, libvirt, etc).
+
+## configuring netdata for cgroups
+
+For each cgroup available in the system, netdata provides this configuration:
+
+```
+[plugin:cgroups]
+ enable cgroup XXX = yes | no
+```
+
+But it also provides a few patterns to provide a sane default (`yes` or `no`).
+
+Below we see, how this works.
+
+### how netdata finds the available cgroups
+
+Linux exposes resource usage reporting and provides dynamic configuration for cgroups, using virtual files (usually) under `/sys/fs/cgroup`. netdata reads `/proc/self/mountinfo` to detect the exact mount point of cgroups. netdata also allows manual configuration of this mount point, using these settings:
+
+```
+[plugin:cgroups]
+ check for new cgroups every = 10
+ path to /sys/fs/cgroup/cpuacct = /sys/fs/cgroup/cpuacct
+ path to /sys/fs/cgroup/blkio = /sys/fs/cgroup/blkio
+ path to /sys/fs/cgroup/memory = /sys/fs/cgroup/memory
+ path to /sys/fs/cgroup/devices = /sys/fs/cgroup/devices
+```
+
+netdata rescans these directories for added or removed cgroups every `check for new cgroups every` seconds.
+
+### hierarchical search for cgroups
+
+Since cgroups are hierarchical, for each of the directories shown above, netdata walks through the subdirectories recursively searching for cgroups (each subdirectory is another cgroup).
+
+For each of the directories found, netdata provides a configuration variable:
+
+```
+[plugin:cgroups]
+ search for cgroups under PATH = yes | no
+```
+
+To provide a sane default for this setting, netdata uses the following pattern list (patterns starting with `!` give a negative match and their order is important: the first matching a path will be used):
+
+```
+[plugin:cgroups]
+ search for cgroups in subpaths matching = !*/init.scope !*-qemu !/init.scope !/system !/systemd !/user !/user.slice *
+```
+
+So, we disable checking for **child cgroups** in systemd internal cgroups ([systemd services are monitored by netdata](https://github.com/netdata/netdata/wiki/monitoring-systemd-services)), user cgroups (normally used for desktop and remote user sessions), qemu virtual machines (child cgroups of virtual machines) and `init.scope`. All others are enabled.
+
+
+### enabled cgroups
+
+To check if the cgroup is enabled, netdata uses this setting:
+
+```
+[plugin:cgroups]
+ enable cgroup NAME = yes | no
+```
+
+To provide a sane default, netdata uses the following pattern list (it checks the pattern against the path of the cgroup):
+
+```
+[plugin:cgroups]
+ enable by default cgroups matching = !*/init.scope *.scope !*/vcpu* !*/emulator !*.mount !*.partition !*.service !*.slice !*.swap !*.user !/ !/docker !/libvirt !/lxc !/lxc/*/ns !/lxc/*/ns/* !/machine !/qemu !/system !/systemd !/user *
+```
+
+The above provides the default `yes` or `no` setting for the cgroup. However, there is an additional step. In many cases the cgroups found in the `/sys/fs/cgroup` hierarchy are just random numbers and in many cases these numbers are ephemeral: they change across reboots or sessions.
+
+So, we need to somehow map the paths of the cgroups to names, to provide consistent netdata configuration (i.e. there is no point to say `enable cgroup 1234 = yes | no`, if `1234` is a random number that changes over time - we need a name for the cgroup first, so that `enable cgroup NAME = yes | no` will be consistent).
+
+For this mapping netdata provides 2 configuration options:
+
+```
+[plugin:cgroups]
+ run script to rename cgroups matching = *.scope *docker* *lxc* *qemu* !/ !*.mount !*.partition !*.service !*.slice !*.swap !*.user *
+ script to get cgroup names = /usr/libexec/netdata/plugins.d/cgroup-name.sh
+```
+
+The whole point for the additional pattern list, is to limit the number of times the script will be called. Without this pattern list, the script might be called thousands of times, depending on the number of cgroups available in the system.
+
+The above pattern list is matched against the path of the cgroup. For matched cgroups, netdata calls the script [cgroup-name.sh](https://github.com/netdata/netdata/blob/master/collectors/cgroups.plugin/cgroup-name.sh.in) to get its name. This script queries `docker`, or applies heuristics to find give a name for the cgroup.
+
+## Monitoring systemd services
+
+netdata monitors **systemd services**. Example:
+
+![image](https://cloud.githubusercontent.com/assets/2662304/21964372/20cd7b84-db53-11e6-98a2-b9c986b082c0.png)
+
+Support per distribution:
+
+system|systemd services<br/>charts shown|`tree`<br/>`/sys/fs/cgroup`|comments
+:-------:|:-------:|:-------:|:------------
+Arch Linux|YES| |
+Gentoo|NO| |can be enabled, see below
+Ubuntu 16.04 LTS|YES| |
+Ubuntu 16.10|YES|[here](http://pastebin.com/PiWbQEXy)|
+Fedora 25|YES|[here](http://pastebin.com/ax0373wF)|
+Debian 8|NO| |can be enabled, see below
+AMI|NO|[here](http://pastebin.com/FrxmptjL)|not a systemd system
+Centos 7.3.1611|NO|[here](http://pastebin.com/SpzgezAg)|can be enabled, see below
+
+#### how to enable cgroup accounting on systemd systems that is by default disabled
+
+You can verify there is no accounting enabled, by running `systemd-cgtop`. The program will show only resources for cgroup ` / `, but all services will show nothing.
+
+To enable cgroup accounting, execute this:
+
+```sh
+sed -e 's|^#Default\(.*\)Accounting=.*$|Default\1Accounting=yes|g' /etc/systemd/system.conf >/tmp/system.conf
+```
+
+To see the changes it made, run this:
+
+```
+# diff /etc/systemd/system.conf /tmp/system.conf
+40,44c40,44
+< #DefaultCPUAccounting=no
+< #DefaultIOAccounting=no
+< #DefaultBlockIOAccounting=no
+< #DefaultMemoryAccounting=no
+< #DefaultTasksAccounting=yes
+---
+> DefaultCPUAccounting=yes
+> DefaultIOAccounting=yes
+> DefaultBlockIOAccounting=yes
+> DefaultMemoryAccounting=yes
+> DefaultTasksAccounting=yes
+```
+
+If you are happy with the changes, run:
+
+```sh
+# copy the file to the right location
+sudo cp /tmp/system.conf /etc/systemd/system.conf
+
+# restart systemd to take it into account
+sudo systemctl daemon-reexec
+```
+
+(`systemctl daemon-reload` does not reload the configuration of the server - so you have to execute `systemctl daemon-reexec`).
+
+Now, when you run `systemd-cgtop`, services will start reporting usage (if it does not, restart a service - any service - to wake it up). Refresh your netdata dashboard, and you will have the charts too.
+
+In case memory accounting is missing, you will need to enable it at your kernel, by appending the following kernel boot options and rebooting:
+
+```
+cgroup_enable=memory swapaccount=1
+```
+
+You can add the above, directly at the `linux` line in your `/boot/grub/grub.cfg` or appending them to the `GRUB_CMDLINE_LINUX` in `/etc/default/grub` (in which case you will have to run `update-grub` before rebooting). On DigitalOcean debian images you may have to set it at `/etc/default/grub.d/50-cloudimg-settings.cfg`.
+
+---
+
+## Monitoring ephemeral containers
+
+netdata monitors containers automatically when it is installed at the host, or when it is installed in a container that has access to the `/proc` and `/sys` filesystems of the host.
+
+netdata prior to v1.6 had 2 issues when such containers were monitored:
+
+1. network interface alarms where triggering when containers were stopped
+
+2. charts were never cleaned up, so after some time dozens of containers were showing up on the dashboard, and they were occupying memory.
+
+
+### the current netdata
+
+network interfaces and cgroups (containers) are now self-cleaned.
+
+So, when a network interface or container stops, netdata might log a few errors in error.log complaining about files it cannot find, but immediately:
+
+1. it will detect this is a removed container or network interface
+2. it will freeze/pause all alarms for them
+3. it will mark their charts as obsolete
+4. obsolete charts are not be offered on new dashboard sessions (so hit F5 and the charts are gone)
+5. existing dashboard sessions will continue to see them, but of course they will not refresh
+6. obsolete charts will be removed from memory, 1 hour after the last user viewed them (configurable with `[global].cleanup obsolete charts after seconds = 3600` (at netdata.conf).
+7. when obsolete charts are removed from memory they are also deleted from disk (configurable with `[global].delete obsolete charts files = yes`)
+
diff --git a/collectors/cgroups.plugin/cgroup-name.sh b/collectors/cgroups.plugin/cgroup-name.sh
new file mode 100644
index 000000000..6bf8b8b03
--- /dev/null
+++ b/collectors/cgroups.plugin/cgroup-name.sh
@@ -0,0 +1,196 @@
+#!/usr/bin/env bash
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Script to find a better name for cgroups
+#
+
+export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin"
+export LC_ALL=C
+
+# -----------------------------------------------------------------------------
+
+PROGRAM_NAME="$(basename "${0}")"
+
+logdate() {
+ date "+%Y-%m-%d %H:%M:%S"
+}
+
+log() {
+ local status="${1}"
+ shift
+
+ echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}"
+
+}
+
+warning() {
+ log WARNING "${@}"
+}
+
+error() {
+ log ERROR "${@}"
+}
+
+info() {
+ log INFO "${@}"
+}
+
+fatal() {
+ log FATAL "${@}"
+ exit 1
+}
+
+debug=0
+debug() {
+ [ $debug -eq 1 ] && log DEBUG "${@}"
+}
+
+# -----------------------------------------------------------------------------
+
+[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="/usr/local/etc/netdata"
+[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="/usr/local/lib/netdata/conf.d"
+
+DOCKER_HOST="${DOCKER_HOST:=/var/run/docker.sock}"
+CGROUP="${1}"
+NAME=
+
+# -----------------------------------------------------------------------------
+
+if [ -z "${CGROUP}" ]
+ then
+ fatal "called without a cgroup name. Nothing to do."
+fi
+
+for CONFIG in "${NETDATA_USER_CONFIG_DIR}/cgroups-names.conf" "${NETDATA_STOCK_CONFIG_DIR}/cgroups-names.conf"
+do
+ if [ -f "${CONFIG}" ]
+ then
+ NAME="$(grep "^${CGROUP} " "${CONFIG}" | sed "s/[[:space:]]\+/ /g" | cut -d ' ' -f 2)"
+ if [ -z "${NAME}" ]
+ then
+ info "cannot find cgroup '${CGROUP}' in '${CONFIG}'."
+ else
+ break
+ fi
+ #else
+ # info "configuration file '${CONFIG}' is not available."
+ fi
+done
+
+function docker_get_name_classic {
+ local id="${1}"
+ info "Running command: docker ps --filter=id=\"${id}\" --format=\"{{.Names}}\""
+ NAME="$( docker ps --filter=id="${id}" --format="{{.Names}}" )"
+ return 0
+}
+
+function docker_get_name_api {
+ local id="${1}"
+ if [ ! -S "${DOCKER_HOST}" ]
+ then
+ warning "Can't find ${DOCKER_HOST}"
+ return 1
+ fi
+ info "Running API command: /containers/${id}/json"
+ JSON=$(echo -e "GET /containers/${id}/json HTTP/1.0\r\n" | nc -U ${DOCKER_HOST} | grep '^{.*')
+ NAME=$(echo $JSON | jq -r .Name,.Config.Hostname | grep -v null | head -n1 | sed 's|^/||')
+ return 0
+}
+
+function docker_get_name {
+ local id="${1}"
+ if hash docker 2>/dev/null
+ then
+ docker_get_name_classic "${id}"
+ else
+ docker_get_name_api "${id}" || docker_get_name_classic "${id}"
+ fi
+ if [ -z "${NAME}" ]
+ then
+ warning "cannot find the name of docker container '${id}'"
+ NAME="${id:0:12}"
+ else
+ info "docker container '${id}' is named '${NAME}'"
+ fi
+}
+
+if [ -z "${NAME}" ]
+ then
+ if [[ "${CGROUP}" =~ ^.*docker[-_/\.][a-fA-F0-9]+[-_\.]?.*$ ]]
+ then
+ # docker containers
+
+ DOCKERID="$( echo "${CGROUP}" | sed "s|^.*docker[-_/]\([a-fA-F0-9]\+\)[-_\.]\?.*$|\1|" )"
+ # echo "DOCKERID=${DOCKERID}"
+
+ if [ ! -z "${DOCKERID}" -a \( ${#DOCKERID} -eq 64 -o ${#DOCKERID} -eq 12 \) ]
+ then
+ docker_get_name "${DOCKERID}"
+ else
+ error "a docker id cannot be extracted from docker cgroup '${CGROUP}'."
+ fi
+ elif [[ "${CGROUP}" =~ ^.*kubepods[_/].*[_/]pod[a-fA-F0-9-]+[_/][a-fA-F0-9]+$ ]]
+ then
+ # kubernetes
+
+ DOCKERID="$( echo "${CGROUP}" | sed "s|^.*kubepods[_/].*[_/]pod[a-fA-F0-9-]\+[_/]\([a-fA-F0-9]\+\)$|\1|" )"
+ # echo "DOCKERID=${DOCKERID}"
+
+ if [ ! -z "${DOCKERID}" -a \( ${#DOCKERID} -eq 64 -o ${#DOCKERID} -eq 12 \) ]
+ then
+ docker_get_name "${DOCKERID}"
+ else
+ error "a docker id cannot be extracted from kubernetes cgroup '${CGROUP}'."
+ fi
+ elif [[ "${CGROUP}" =~ machine.slice[_/].*\.service ]]
+ then
+ # systemd-nspawn
+
+ NAME="$(echo ${CGROUP} | sed 's/.*machine.slice[_\/]\(.*\)\.service/\1/g')"
+
+ elif [[ "${CGROUP}" =~ machine.slice_machine.*-qemu ]]
+ then
+ # libvirtd / qemu virtual machines
+
+ # NAME="$(echo ${CGROUP} | sed 's/machine.slice_machine.*-qemu//; s/\/x2d//; s/\/x2d/\-/g; s/\.scope//g')"
+ NAME="qemu_$(echo ${CGROUP} | sed 's/machine.slice_machine.*-qemu//; s/\/x2d[[:digit:]]*//; s/\/x2d//g; s/\.scope//g')"
+
+ elif [[ "${CGROUP}" =~ machine_.*\.libvirt-qemu ]]
+ then
+ # libvirtd / qemu virtual machines
+ NAME="qemu_$(echo ${CGROUP} | sed 's/^machine_//; s/\.libvirt-qemu$//; s/-/_/;')"
+
+ elif [[ "${CGROUP}" =~ qemu.slice_([0-9]+).scope && -d /etc/pve ]]
+ then
+ # Proxmox VMs
+
+ FILENAME="/etc/pve/qemu-server/${BASH_REMATCH[1]}.conf"
+ if [[ -f $FILENAME && -r $FILENAME ]]
+ then
+ NAME="qemu_$(grep -e '^name: ' "/etc/pve/qemu-server/${BASH_REMATCH[1]}.conf" | head -1 | sed -rn 's|\s*name\s*:\s*(.*)?$|\1|p')"
+ else
+ error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group."
+ fi
+ elif [[ "${CGROUP}" =~ lxc_([0-9]+) && -d /etc/pve ]]
+ then
+ # Proxmox Containers (LXC)
+
+ FILENAME="/etc/pve/lxc/${BASH_REMATCH[1]}.conf"
+ if [[ -f ${FILENAME} && -r ${FILENAME} ]]
+ then
+ NAME=$(grep -e '^hostname: ' /etc/pve/lxc/${BASH_REMATCH[1]}.conf | head -1 | sed -rn 's|\s*hostname\s*:\s*(.*)?$|\1|p')
+ else
+ error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group."
+ fi
+ fi
+
+ [ -z "${NAME}" ] && NAME="${CGROUP}"
+ [ ${#NAME} -gt 100 ] && NAME="${NAME:0:100}"
+fi
+
+info "cgroup '${CGROUP}' is called '${NAME}'"
+echo "${NAME}"
diff --git a/collectors/cgroups.plugin/cgroup-name.sh.in b/collectors/cgroups.plugin/cgroup-name.sh.in
new file mode 100755
index 000000000..53696a4bf
--- /dev/null
+++ b/collectors/cgroups.plugin/cgroup-name.sh.in
@@ -0,0 +1,196 @@
+#!/usr/bin/env bash
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Script to find a better name for cgroups
+#
+
+export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin"
+export LC_ALL=C
+
+# -----------------------------------------------------------------------------
+
+PROGRAM_NAME="$(basename "${0}")"
+
+logdate() {
+ date "+%Y-%m-%d %H:%M:%S"
+}
+
+log() {
+ local status="${1}"
+ shift
+
+ echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}"
+
+}
+
+warning() {
+ log WARNING "${@}"
+}
+
+error() {
+ log ERROR "${@}"
+}
+
+info() {
+ log INFO "${@}"
+}
+
+fatal() {
+ log FATAL "${@}"
+ exit 1
+}
+
+debug=0
+debug() {
+ [ $debug -eq 1 ] && log DEBUG "${@}"
+}
+
+# -----------------------------------------------------------------------------
+
+[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="@configdir_POST@"
+[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="@libconfigdir_POST@"
+
+DOCKER_HOST="${DOCKER_HOST:=/var/run/docker.sock}"
+CGROUP="${1}"
+NAME=
+
+# -----------------------------------------------------------------------------
+
+if [ -z "${CGROUP}" ]
+ then
+ fatal "called without a cgroup name. Nothing to do."
+fi
+
+for CONFIG in "${NETDATA_USER_CONFIG_DIR}/cgroups-names.conf" "${NETDATA_STOCK_CONFIG_DIR}/cgroups-names.conf"
+do
+ if [ -f "${CONFIG}" ]
+ then
+ NAME="$(grep "^${CGROUP} " "${CONFIG}" | sed "s/[[:space:]]\+/ /g" | cut -d ' ' -f 2)"
+ if [ -z "${NAME}" ]
+ then
+ info "cannot find cgroup '${CGROUP}' in '${CONFIG}'."
+ else
+ break
+ fi
+ #else
+ # info "configuration file '${CONFIG}' is not available."
+ fi
+done
+
+function docker_get_name_classic {
+ local id="${1}"
+ info "Running command: docker ps --filter=id=\"${id}\" --format=\"{{.Names}}\""
+ NAME="$( docker ps --filter=id="${id}" --format="{{.Names}}" )"
+ return 0
+}
+
+function docker_get_name_api {
+ local id="${1}"
+ if [ ! -S "${DOCKER_HOST}" ]
+ then
+ warning "Can't find ${DOCKER_HOST}"
+ return 1
+ fi
+ info "Running API command: /containers/${id}/json"
+ JSON=$(echo -e "GET /containers/${id}/json HTTP/1.0\r\n" | nc -U ${DOCKER_HOST} | grep '^{.*')
+ NAME=$(echo $JSON | jq -r .Name,.Config.Hostname | grep -v null | head -n1 | sed 's|^/||')
+ return 0
+}
+
+function docker_get_name {
+ local id="${1}"
+ if hash docker 2>/dev/null
+ then
+ docker_get_name_classic "${id}"
+ else
+ docker_get_name_api "${id}" || docker_get_name_classic "${id}"
+ fi
+ if [ -z "${NAME}" ]
+ then
+ warning "cannot find the name of docker container '${id}'"
+ NAME="${id:0:12}"
+ else
+ info "docker container '${id}' is named '${NAME}'"
+ fi
+}
+
+if [ -z "${NAME}" ]
+ then
+ if [[ "${CGROUP}" =~ ^.*docker[-_/\.][a-fA-F0-9]+[-_\.]?.*$ ]]
+ then
+ # docker containers
+
+ DOCKERID="$( echo "${CGROUP}" | sed "s|^.*docker[-_/]\([a-fA-F0-9]\+\)[-_\.]\?.*$|\1|" )"
+ # echo "DOCKERID=${DOCKERID}"
+
+ if [ ! -z "${DOCKERID}" -a \( ${#DOCKERID} -eq 64 -o ${#DOCKERID} -eq 12 \) ]
+ then
+ docker_get_name "${DOCKERID}"
+ else
+ error "a docker id cannot be extracted from docker cgroup '${CGROUP}'."
+ fi
+ elif [[ "${CGROUP}" =~ ^.*kubepods[_/].*[_/]pod[a-fA-F0-9-]+[_/][a-fA-F0-9]+$ ]]
+ then
+ # kubernetes
+
+ DOCKERID="$( echo "${CGROUP}" | sed "s|^.*kubepods[_/].*[_/]pod[a-fA-F0-9-]\+[_/]\([a-fA-F0-9]\+\)$|\1|" )"
+ # echo "DOCKERID=${DOCKERID}"
+
+ if [ ! -z "${DOCKERID}" -a \( ${#DOCKERID} -eq 64 -o ${#DOCKERID} -eq 12 \) ]
+ then
+ docker_get_name "${DOCKERID}"
+ else
+ error "a docker id cannot be extracted from kubernetes cgroup '${CGROUP}'."
+ fi
+ elif [[ "${CGROUP}" =~ machine.slice[_/].*\.service ]]
+ then
+ # systemd-nspawn
+
+ NAME="$(echo ${CGROUP} | sed 's/.*machine.slice[_\/]\(.*\)\.service/\1/g')"
+
+ elif [[ "${CGROUP}" =~ machine.slice_machine.*-qemu ]]
+ then
+ # libvirtd / qemu virtual machines
+
+ # NAME="$(echo ${CGROUP} | sed 's/machine.slice_machine.*-qemu//; s/\/x2d//; s/\/x2d/\-/g; s/\.scope//g')"
+ NAME="qemu_$(echo ${CGROUP} | sed 's/machine.slice_machine.*-qemu//; s/\/x2d[[:digit:]]*//; s/\/x2d//g; s/\.scope//g')"
+
+ elif [[ "${CGROUP}" =~ machine_.*\.libvirt-qemu ]]
+ then
+ # libvirtd / qemu virtual machines
+ NAME="qemu_$(echo ${CGROUP} | sed 's/^machine_//; s/\.libvirt-qemu$//; s/-/_/;')"
+
+ elif [[ "${CGROUP}" =~ qemu.slice_([0-9]+).scope && -d /etc/pve ]]
+ then
+ # Proxmox VMs
+
+ FILENAME="/etc/pve/qemu-server/${BASH_REMATCH[1]}.conf"
+ if [[ -f $FILENAME && -r $FILENAME ]]
+ then
+ NAME="qemu_$(grep -e '^name: ' "/etc/pve/qemu-server/${BASH_REMATCH[1]}.conf" | head -1 | sed -rn 's|\s*name\s*:\s*(.*)?$|\1|p')"
+ else
+ error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group."
+ fi
+ elif [[ "${CGROUP}" =~ lxc_([0-9]+) && -d /etc/pve ]]
+ then
+ # Proxmox Containers (LXC)
+
+ FILENAME="/etc/pve/lxc/${BASH_REMATCH[1]}.conf"
+ if [[ -f ${FILENAME} && -r ${FILENAME} ]]
+ then
+ NAME=$(grep -e '^hostname: ' /etc/pve/lxc/${BASH_REMATCH[1]}.conf | head -1 | sed -rn 's|\s*hostname\s*:\s*(.*)?$|\1|p')
+ else
+ error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group."
+ fi
+ fi
+
+ [ -z "${NAME}" ] && NAME="${CGROUP}"
+ [ ${#NAME} -gt 100 ] && NAME="${NAME:0:100}"
+fi
+
+info "cgroup '${CGROUP}' is called '${NAME}'"
+echo "${NAME}"
diff --git a/collectors/cgroups.plugin/cgroup-network-helper.sh b/collectors/cgroups.plugin/cgroup-network-helper.sh
new file mode 100755
index 000000000..666f02fc8
--- /dev/null
+++ b/collectors/cgroups.plugin/cgroup-network-helper.sh
@@ -0,0 +1,258 @@
+#!/usr/bin/env bash
+# shellcheck disable=SC1117
+
+# cgroup-network-helper.sh
+# detect container and virtual machine interfaces
+#
+# (C) 2017 Costa Tsaousis
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# This script is called as root (by cgroup-network), with either a pid, or a cgroup path.
+# It tries to find all the network interfaces that belong to the same cgroup.
+#
+# It supports several method for this detection:
+#
+# 1. cgroup-network (the binary father of this script) detects veth network interfaces,
+# by examining iflink and ifindex IDs and switching namespaces
+# (it also detects the interface name as it is used by the container).
+#
+# 2. this script, uses /proc/PID/fdinfo to find tun/tap network interfaces.
+#
+# 3. this script, calls virsh to find libvirt network interfaces.
+#
+
+# -----------------------------------------------------------------------------
+
+# the system path is cleared by cgroup-network
+# shellcheck source=/dev/null
+[ -f /etc/profile ] && source /etc/profile
+
+export LC_ALL=C
+
+PROGRAM_NAME="$(basename "${0}")"
+
+logdate() {
+ date "+%Y-%m-%d %H:%M:%S"
+}
+
+log() {
+ local status="${1}"
+ shift
+
+ echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}"
+
+}
+
+warning() {
+ log WARNING "${@}"
+}
+
+error() {
+ log ERROR "${@}"
+}
+
+info() {
+ log INFO "${@}"
+}
+
+fatal() {
+ log FATAL "${@}"
+ exit 1
+}
+
+debug=0
+debug() {
+ [ "${debug}" = "1" ] && log DEBUG "${@}"
+}
+
+# -----------------------------------------------------------------------------
+# check for BASH v4+ (required for associative arrays)
+
+[ $(( BASH_VERSINFO[0] )) -lt 4 ] && \
+ fatal "BASH version 4 or later is required (this is ${BASH_VERSION})."
+
+# -----------------------------------------------------------------------------
+# parse the arguments
+
+pid=
+cgroup=
+while [ ! -z "${1}" ]
+do
+ case "${1}" in
+ --cgroup) cgroup="${2}"; shift 1;;
+ --pid|-p) pid="${2}"; shift 1;;
+ --debug|debug) debug=1;;
+ *) fatal "Cannot understand argument '${1}'";;
+ esac
+
+ shift
+done
+
+if [ -z "${pid}" ] && [ -z "${cgroup}" ]
+then
+ fatal "Either --pid or --cgroup is required"
+fi
+
+# -----------------------------------------------------------------------------
+
+set_source() {
+ [ ${debug} -eq 1 ] && echo "SRC ${*}"
+}
+
+
+# -----------------------------------------------------------------------------
+# veth interfaces via cgroup
+
+# cgroup-network can detect veth interfaces by itself (written in C).
+# If you seek for a shell version of what it does, check this:
+# https://github.com/netdata/netdata/issues/474#issuecomment-317866709
+
+
+# -----------------------------------------------------------------------------
+# tun/tap interfaces via /proc/PID/fdinfo
+
+# find any tun/tap devices linked to a pid
+proc_pid_fdinfo_iff() {
+ local p="${1}" # the pid
+
+ debug "Searching for tun/tap interfaces for pid ${p}..."
+ set_source "fdinfo"
+ grep "^iff:.*" "${NETDATA_HOST_PREFIX}/proc/${p}/fdinfo"/* 2>/dev/null | cut -f 2
+}
+
+find_tun_tap_interfaces_for_cgroup() {
+ local c="${1}" # the cgroup path
+
+ # for each pid of the cgroup
+ # find any tun/tap devices linked to the pid
+ if [ -f "${c}/emulator/cgroup.procs" ]
+ then
+ local p
+ for p in $(< "${c}/emulator/cgroup.procs" )
+ do
+ proc_pid_fdinfo_iff "${p}"
+ done
+ fi
+}
+
+
+# -----------------------------------------------------------------------------
+# virsh domain network interfaces
+
+virsh_cgroup_to_domain_name() {
+ local c="${1}" # the cgroup path
+
+ debug "extracting a possible virsh domain from cgroup ${c}..."
+
+ # extract for the cgroup path
+ sed -n -e "s|.*/machine-qemu\\\\x2d[0-9]\+\\\\x2d\(.*\)\.scope$|\1|p" \
+ -e "s|.*/machine/\(.*\)\.libvirt-qemu$|\1|p" \
+ <<EOF
+${c}
+EOF
+}
+
+virsh_find_all_interfaces_for_cgroup() {
+ local c="${1}" # the cgroup path
+
+ # the virsh command
+ local virsh
+ # shellcheck disable=SC2230
+ virsh="$(which virsh 2>/dev/null || command -v virsh 2>/dev/null)"
+
+ if [ ! -z "${virsh}" ]
+ then
+ local d
+ d="$(virsh_cgroup_to_domain_name "${c}")"
+
+ if [ ! -z "${d}" ]
+ then
+ debug "running: virsh domiflist ${d}; to find the network interfaces"
+
+ # match only 'network' interfaces from virsh output
+
+ set_source "virsh"
+ "${virsh}" -r domiflist "${d}" |\
+ sed -n \
+ -e "s|^\([^[:space:]]\+\)[[:space:]]\+network[[:space:]]\+\([^[:space:]]\+\)[[:space:]]\+[^[:space:]]\+[[:space:]]\+[^[:space:]]\+$|\1 \1_\2|p" \
+ -e "s|^\([^[:space:]]\+\)[[:space:]]\+bridge[[:space:]]\+\([^[:space:]]\+\)[[:space:]]\+[^[:space:]]\+[[:space:]]\+[^[:space:]]\+$|\1 \1_\2|p"
+ else
+ debug "no virsh domain extracted from cgroup ${c}"
+ fi
+ else
+ debug "virsh command is not available"
+ fi
+}
+
+# -----------------------------------------------------------------------------
+
+find_all_interfaces_of_pid_or_cgroup() {
+ local p="${1}" c="${2}" # the pid and the cgroup path
+
+ if [ ! -z "${pid}" ]
+ then
+ # we have been called with a pid
+
+ proc_pid_fdinfo_iff "${p}"
+
+ elif [ ! -z "${c}" ]
+ then
+ # we have been called with a cgroup
+
+ info "searching for network interfaces of cgroup '${c}'"
+
+ find_tun_tap_interfaces_for_cgroup "${c}"
+ virsh_find_all_interfaces_for_cgroup "${c}"
+
+ else
+
+ error "Either a pid or a cgroup path is needed"
+ return 1
+
+ fi
+
+ return 0
+}
+
+# -----------------------------------------------------------------------------
+
+# an associative array to store the interfaces
+# the index is the interface name as seen by the host
+# the value is the interface name as seen by the guest / container
+declare -A devs=()
+
+# store all interfaces found in the associative array
+# this will also give the unique devices, as seen by the host
+last_src=
+# shellcheck disable=SC2162
+while read host_device guest_device
+do
+ [ -z "${host_device}" ] && continue
+
+ [ "${host_device}" = "SRC" ] && last_src="${guest_device}" && continue
+
+ # the default guest_device is the host_device
+ [ -z "${guest_device}" ] && guest_device="${host_device}"
+
+ # when we run in debug, show the source
+ debug "Found host device '${host_device}', guest device '${guest_device}', detected via '${last_src}'"
+
+ if [ -z "${devs[${host_device}]}" ] || [ "${devs[${host_device}]}" = "${host_device}" ]; then
+ devs[${host_device}]="${guest_device}"
+ fi
+
+done < <( find_all_interfaces_of_pid_or_cgroup "${pid}" "${cgroup}" )
+
+# print the interfaces found, in the format netdata expects them
+found=0
+for x in "${!devs[@]}"
+do
+ found=$((found + 1))
+ echo "${x} ${devs[${x}]}"
+done
+
+debug "found ${found} network interfaces for pid '${pid}', cgroup '${cgroup}', run as ${USER}, ${UID}"
+
+# let netdata know if we found any
+[ ${found} -eq 0 ] && exit 1
+exit 0
diff --git a/collectors/cgroups.plugin/cgroup-network.c b/collectors/cgroups.plugin/cgroup-network.c
new file mode 100644
index 000000000..0cf2a2633
--- /dev/null
+++ b/collectors/cgroups.plugin/cgroup-network.c
@@ -0,0 +1,682 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "libnetdata/libnetdata.h"
+
+#ifdef HAVE_SETNS
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE /* See feature_test_macros(7) */
+#endif
+#include <sched.h>
+#endif
+
+char environment_variable2[FILENAME_MAX + 50] = "";
+char *environment[] = {
+ "PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin",
+ environment_variable2,
+ NULL
+};
+
+
+// ----------------------------------------------------------------------------
+
+// callback required by fatal()
+void netdata_cleanup_and_exit(int ret) {
+ exit(ret);
+}
+
+// callbacks required by popen()
+void signals_block(void) {};
+void signals_unblock(void) {};
+void signals_reset(void) {};
+
+// callback required by eval()
+int health_variable_lookup(const char *variable, uint32_t hash, struct rrdcalc *rc, calculated_number *result) {
+ (void)variable;
+ (void)hash;
+ (void)rc;
+ (void)result;
+ return 0;
+};
+
+// required by get_system_cpus()
+char *netdata_configured_host_prefix = "";
+
+// ----------------------------------------------------------------------------
+
+struct iface {
+ const char *device;
+ uint32_t hash;
+
+ unsigned int ifindex;
+ unsigned int iflink;
+
+ struct iface *next;
+};
+
+unsigned int read_iface_iflink(const char *prefix, const char *iface) {
+ if(!prefix) prefix = "";
+
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/sys/class/net/%s/iflink", prefix, iface);
+
+ unsigned long long iflink = 0;
+ int ret = read_single_number_file(filename, &iflink);
+ if(ret) error("Cannot read '%s'.", filename);
+
+ return (unsigned int)iflink;
+}
+
+unsigned int read_iface_ifindex(const char *prefix, const char *iface) {
+ if(!prefix) prefix = "";
+
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/sys/class/net/%s/ifindex", prefix, iface);
+
+ unsigned long long ifindex = 0;
+ int ret = read_single_number_file(filename, &ifindex);
+ if(ret) error("Cannot read '%s'.", filename);
+
+ return (unsigned int)ifindex;
+}
+
+struct iface *read_proc_net_dev(const char *prefix) {
+ if(!prefix) prefix = "";
+
+ procfile *ff = NULL;
+ char filename[FILENAME_MAX + 1];
+
+ snprintfz(filename, FILENAME_MAX, "%s%s", prefix, (*prefix)?"/proc/1/net/dev":"/proc/net/dev");
+ ff = procfile_open(filename, " \t,:|", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) {
+ error("Cannot open file '%s'", filename);
+ return NULL;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) {
+ error("Cannot read file '%s'", filename);
+ return NULL;
+ }
+
+ size_t lines = procfile_lines(ff), l;
+ struct iface *root = NULL;
+ for(l = 2; l < lines ;l++) {
+ if (unlikely(procfile_linewords(ff, l) < 1)) continue;
+
+ struct iface *t = callocz(1, sizeof(struct iface));
+ t->device = strdupz(procfile_lineword(ff, l, 0));
+ t->hash = simple_hash(t->device);
+ t->ifindex = read_iface_ifindex(prefix, t->device);
+ t->iflink = read_iface_iflink(prefix, t->device);
+ t->next = root;
+ root = t;
+ }
+
+ procfile_close(ff);
+
+ return root;
+}
+
+void free_iface(struct iface *iface) {
+ freez((void *)iface->device);
+ freez(iface);
+}
+
+void free_host_ifaces(struct iface *iface) {
+ while(iface) {
+ struct iface *t = iface->next;
+ free_iface(iface);
+ iface = t;
+ }
+}
+
+int iface_is_eligible(struct iface *iface) {
+ if(iface->iflink != iface->ifindex)
+ return 1;
+
+ return 0;
+}
+
+int eligible_ifaces(struct iface *root) {
+ int eligible = 0;
+
+ struct iface *t;
+ for(t = root; t ; t = t->next)
+ if(iface_is_eligible(t))
+ eligible++;
+
+ return eligible;
+}
+
+static void continue_as_child(void) {
+ pid_t child = fork();
+ int status;
+ pid_t ret;
+
+ if (child < 0)
+ error("fork() failed");
+
+ /* Only the child returns */
+ if (child == 0)
+ return;
+
+ for (;;) {
+ ret = waitpid(child, &status, WUNTRACED);
+ if ((ret == child) && (WIFSTOPPED(status))) {
+ /* The child suspended so suspend us as well */
+ kill(getpid(), SIGSTOP);
+ kill(child, SIGCONT);
+ } else {
+ break;
+ }
+ }
+
+ /* Return the child's exit code if possible */
+ if (WIFEXITED(status)) {
+ exit(WEXITSTATUS(status));
+ } else if (WIFSIGNALED(status)) {
+ kill(getpid(), WTERMSIG(status));
+ }
+
+ exit(EXIT_FAILURE);
+}
+
+int proc_pid_fd(const char *prefix, const char *ns, pid_t pid) {
+ if(!prefix) prefix = "";
+
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/proc/%d/%s", prefix, (int)pid, ns);
+ int fd = open(filename, O_RDONLY);
+
+ if(fd == -1)
+ error("Cannot open proc_pid_fd() file '%s'", filename);
+
+ return fd;
+}
+
+static struct ns {
+ int nstype;
+ int fd;
+ int status;
+ const char *name;
+ const char *path;
+} all_ns[] = {
+ // { .nstype = CLONE_NEWUSER, .fd = -1, .status = -1, .name = "user", .path = "ns/user" },
+ // { .nstype = CLONE_NEWCGROUP, .fd = -1, .status = -1, .name = "cgroup", .path = "ns/cgroup" },
+ // { .nstype = CLONE_NEWIPC, .fd = -1, .status = -1, .name = "ipc", .path = "ns/ipc" },
+ // { .nstype = CLONE_NEWUTS, .fd = -1, .status = -1, .name = "uts", .path = "ns/uts" },
+ { .nstype = CLONE_NEWNET, .fd = -1, .status = -1, .name = "network", .path = "ns/net" },
+ { .nstype = CLONE_NEWPID, .fd = -1, .status = -1, .name = "pid", .path = "ns/pid" },
+ { .nstype = CLONE_NEWNS, .fd = -1, .status = -1, .name = "mount", .path = "ns/mnt" },
+
+ // terminator
+ { .nstype = 0, .fd = -1, .status = -1, .name = NULL, .path = NULL }
+};
+
+int switch_namespace(const char *prefix, pid_t pid) {
+ if(!prefix) prefix = "";
+
+#ifdef HAVE_SETNS
+
+ int i;
+ for(i = 0; all_ns[i].name ; i++)
+ all_ns[i].fd = proc_pid_fd(prefix, all_ns[i].path, pid);
+
+ int root_fd = proc_pid_fd(prefix, "root", pid);
+ int cwd_fd = proc_pid_fd(prefix, "cwd", pid);
+
+ setgroups(0, NULL);
+
+ // 2 passes - found it at nsenter source code
+ // this is related CLONE_NEWUSER functionality
+
+ // This code cannot switch user namespace (it can all the other namespaces)
+ // Fortunately, we don't need to switch user namespaces.
+
+ int pass, errors = 0;
+ for(pass = 0; pass < 2 ;pass++) {
+ for(i = 0; all_ns[i].name ; i++) {
+ if (all_ns[i].fd != -1 && all_ns[i].status == -1) {
+ if(setns(all_ns[i].fd, all_ns[i].nstype) == -1) {
+ if(pass == 1) {
+ all_ns[i].status = 0;
+ error("Cannot switch to %s namespace of pid %d", all_ns[i].name, (int) pid);
+ errors++;
+ }
+ }
+ else
+ all_ns[i].status = 1;
+ }
+ }
+ }
+
+ setgroups(0, NULL);
+
+ if(root_fd != -1) {
+ if(fchdir(root_fd) < 0)
+ error("Cannot fchdir() to pid %d root directory", (int)pid);
+
+ if(chroot(".") < 0)
+ error("Cannot chroot() to pid %d root directory", (int)pid);
+
+ close(root_fd);
+ }
+
+ if(cwd_fd != -1) {
+ if(fchdir(cwd_fd) < 0)
+ error("Cannot fchdir() to pid %d current working directory", (int)pid);
+
+ close(cwd_fd);
+ }
+
+ int do_fork = 0;
+ for(i = 0; all_ns[i].name ; i++)
+ if(all_ns[i].fd != -1) {
+
+ // CLONE_NEWPID requires a fork() to become effective
+ if(all_ns[i].nstype == CLONE_NEWPID && all_ns[i].status)
+ do_fork = 1;
+
+ close(all_ns[i].fd);
+ }
+
+ if(do_fork)
+ continue_as_child();
+
+ return 0;
+
+#else
+
+ errno = ENOSYS;
+ error("setns() is missing on this system.");
+ return 1;
+
+#endif
+}
+
+pid_t read_pid_from_cgroup_file(const char *filename) {
+ int fd = open(filename, procfile_open_flags);
+ if(fd == -1) {
+ error("Cannot open pid_from_cgroup() file '%s'.", filename);
+ return 0;
+ }
+
+ FILE *fp = fdopen(fd, "r");
+ if(!fp) {
+ error("Cannot upgrade fd to fp for file '%s'.", filename);
+ return 0;
+ }
+
+ char buffer[100 + 1];
+ pid_t pid = 0;
+ char *s;
+ while((s = fgets(buffer, 100, fp))) {
+ buffer[100] = '\0';
+ pid = atoi(s);
+ if(pid > 0) break;
+ }
+
+ fclose(fp);
+ return pid;
+}
+
+pid_t read_pid_from_cgroup_files(const char *path) {
+ char filename[FILENAME_MAX + 1];
+
+ snprintfz(filename, FILENAME_MAX, "%s/cgroup.procs", path);
+ pid_t pid = read_pid_from_cgroup_file(filename);
+ if(pid > 0) return pid;
+
+ snprintfz(filename, FILENAME_MAX, "%s/tasks", path);
+ return read_pid_from_cgroup_file(filename);
+}
+
+pid_t read_pid_from_cgroup(const char *path) {
+ pid_t pid = read_pid_from_cgroup_files(path);
+ if (pid > 0) return pid;
+
+ DIR *dir = opendir(path);
+ if (!dir) {
+ error("cannot read directory '%s'", path);
+ return 0;
+ }
+
+ struct dirent *de = NULL;
+ while ((de = readdir(dir))) {
+ if (de->d_type == DT_DIR
+ && (
+ (de->d_name[0] == '.' && de->d_name[1] == '\0')
+ || (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0')
+ ))
+ continue;
+
+ if (de->d_type == DT_DIR) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/%s", path, de->d_name);
+ pid = read_pid_from_cgroup(filename);
+ if(pid > 0) break;
+ }
+ }
+ closedir(dir);
+ return pid;
+}
+
+// ----------------------------------------------------------------------------
+// send the result to netdata
+
+struct found_device {
+ const char *host_device;
+ const char *guest_device;
+
+ uint32_t host_device_hash;
+
+ struct found_device *next;
+} *detected_devices = NULL;
+
+void add_device(const char *host, const char *guest) {
+ uint32_t hash = simple_hash(host);
+
+ if(guest && (!*guest || strcmp(host, guest) == 0))
+ guest = NULL;
+
+ struct found_device *f;
+ for(f = detected_devices; f ; f = f->next) {
+ if(f->host_device_hash == hash && strcmp(host, f->host_device) == 0) {
+
+ if(guest && !f->guest_device)
+ f->guest_device = strdupz(guest);
+
+ return;
+ }
+ }
+
+ f = mallocz(sizeof(struct found_device));
+ f->host_device = strdupz(host);
+ f->host_device_hash = hash;
+ f->guest_device = (guest)?strdupz(guest):NULL;
+ f->next = detected_devices;
+ detected_devices = f;
+}
+
+int send_devices(void) {
+ int found = 0;
+
+ struct found_device *f;
+ for(f = detected_devices; f ; f = f->next) {
+ found++;
+ printf("%s %s\n", f->host_device, (f->guest_device)?f->guest_device:f->host_device);
+ }
+
+ return found;
+}
+
+// ----------------------------------------------------------------------------
+// this function should be called only **ONCE**
+// also it has to be the **LAST** to be called
+// since it switches namespaces, so after this call, everything is different!
+
+void detect_veth_interfaces(pid_t pid) {
+ struct iface *host = NULL, *cgroup = NULL, *h, *c;
+
+ host = read_proc_net_dev(netdata_configured_host_prefix);
+ if(!host) {
+ errno = 0;
+ error("cannot read host interface list.");
+ goto cleanup;
+ }
+
+ if(!eligible_ifaces(host)) {
+ errno = 0;
+ error("there are no double-linked host interfaces available.");
+ goto cleanup;
+ }
+
+ if(switch_namespace(netdata_configured_host_prefix, pid)) {
+ errno = 0;
+ error("cannot switch to the namespace of pid %u", (unsigned int) pid);
+ goto cleanup;
+ }
+
+ cgroup = read_proc_net_dev(NULL);
+ if(!cgroup) {
+ errno = 0;
+ error("cannot read cgroup interface list.");
+ goto cleanup;
+ }
+
+ if(!eligible_ifaces(cgroup)) {
+ errno = 0;
+ error("there are not double-linked cgroup interfaces available.");
+ goto cleanup;
+ }
+
+ for(h = host; h ; h = h->next) {
+ if(iface_is_eligible(h)) {
+ for (c = cgroup; c; c = c->next) {
+ if(iface_is_eligible(c) && h->ifindex == c->iflink && h->iflink == c->ifindex) {
+ add_device(h->device, c->device);
+ }
+ }
+ }
+ }
+
+cleanup:
+ free_host_ifaces(cgroup);
+ free_host_ifaces(host);
+}
+
+// ----------------------------------------------------------------------------
+// call the external helper
+
+#define CGROUP_NETWORK_INTERFACE_MAX_LINE 2048
+void call_the_helper(pid_t pid, const char *cgroup) {
+ if(setresuid(0, 0, 0) == -1)
+ error("setresuid(0, 0, 0) failed.");
+
+ char command[CGROUP_NETWORK_INTERFACE_MAX_LINE + 1];
+ if(cgroup)
+ snprintfz(command, CGROUP_NETWORK_INTERFACE_MAX_LINE, "exec " PLUGINS_DIR "/cgroup-network-helper.sh --cgroup '%s'", cgroup);
+ else
+ snprintfz(command, CGROUP_NETWORK_INTERFACE_MAX_LINE, "exec " PLUGINS_DIR "/cgroup-network-helper.sh --pid %d", pid);
+
+ info("running: %s", command);
+
+ pid_t cgroup_pid;
+ FILE *fp = mypopene(command, &cgroup_pid, environment);
+ if(fp) {
+ char buffer[CGROUP_NETWORK_INTERFACE_MAX_LINE + 1];
+ char *s;
+ while((s = fgets(buffer, CGROUP_NETWORK_INTERFACE_MAX_LINE, fp))) {
+ trim(s);
+
+ if(*s && *s != '\n') {
+ char *t = s;
+ while(*t && *t != ' ') t++;
+ if(*t == ' ') {
+ *t = '\0';
+ t++;
+ }
+
+ if(!*s || !*t) continue;
+ add_device(s, t);
+ }
+ }
+
+ mypclose(fp, cgroup_pid);
+ }
+ else
+ error("cannot execute cgroup-network helper script: %s", command);
+}
+
+int is_valid_path_symbol(char c) {
+ switch(c) {
+ case '/': // path separators
+ case '\\': // needed for virsh domains \x2d1\x2dname
+ case ' ': // space
+ case '-': // hyphen
+ case '_': // underscore
+ case '.': // dot
+ case ',': // comma
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+// we will pass this path a shell script running as root
+// so, we need to make sure the path will be valid
+// and will not include anything that could allow
+// the caller use shell expansion for gaining escalated
+// privileges.
+int verify_path(const char *path) {
+ struct stat sb;
+
+ char c;
+ const char *s = path;
+ while((c = *s++)) {
+ if(!( isalnum(c) || is_valid_path_symbol(c) )) {
+ error("invalid character in path '%s'", path);
+ return -1;
+ }
+ }
+
+ if(strstr(path, "\\") && !strstr(path, "\\x")) {
+ error("invalid escape sequence in path '%s'", path);
+ return 1;
+ }
+
+ if(strstr(path, "/../")) {
+ error("invalid parent path sequence detected in '%s'", path);
+ return 1;
+ }
+
+ if(path[0] != '/') {
+ error("only absolute path names are supported - invalid path '%s'", path);
+ return -1;
+ }
+
+ if (stat(path, &sb) == -1) {
+ error("cannot stat() path '%s'", path);
+ return -1;
+ }
+
+ if((sb.st_mode & S_IFMT) != S_IFDIR) {
+ error("path '%s' is not a directory", path);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+char *fix_path_variable(void) {
+ const char *path = getenv("PATH");
+ if(!path || !*path) return 0;
+
+ char *p = strdupz(path);
+ char *safe_path = callocz(1, strlen(p) + strlen("PATH=") + 1);
+ strcpy(safe_path, "PATH=");
+
+ int added = 0;
+ char *ptr = p;
+ while(ptr && *ptr) {
+ char *s = strsep(&ptr, ":");
+ if(s && *s) {
+ if(verify_path(s) == -1) {
+ error("the PATH variable includes an invalid path '%s' - removed it.", s);
+ }
+ else {
+ info("the PATH variable includes a valid path '%s'.", s);
+ if(added) strcat(safe_path, ":");
+ strcat(safe_path, s);
+ added++;
+ }
+ }
+ }
+
+ info("unsafe PATH: '%s'.", path);
+ info(" safe PATH: '%s'.", safe_path);
+
+ freez(p);
+ return safe_path;
+}
+*/
+
+// ----------------------------------------------------------------------------
+// main
+
+void usage(void) {
+ fprintf(stderr, "%s [ -p PID | --pid PID | --cgroup /path/to/cgroup ]\n", program_name);
+ exit(1);
+}
+
+int main(int argc, char **argv) {
+ pid_t pid = 0;
+
+ program_name = argv[0];
+ program_version = VERSION;
+ error_log_syslog = 0;
+
+ // since cgroup-network runs as root, prevent it from opening symbolic links
+ procfile_open_flags = O_RDONLY|O_NOFOLLOW;
+
+ // ------------------------------------------------------------------------
+ // make sure NETDATA_HOST_PREFIX is safe
+
+ netdata_configured_host_prefix = getenv("NETDATA_HOST_PREFIX");
+ if(verify_netdata_host_prefix() == -1) exit(1);
+
+ if(netdata_configured_host_prefix[0] != '\0' && verify_path(netdata_configured_host_prefix) == -1)
+ fatal("invalid NETDATA_HOST_PREFIX '%s'", netdata_configured_host_prefix);
+
+ // ------------------------------------------------------------------------
+ // build a safe environment for our script
+
+ // the first environment variable is a fixed PATH=
+ snprintfz(environment_variable2, sizeof(environment_variable2) - 1, "NETDATA_HOST_PREFIX=%s", netdata_configured_host_prefix);
+
+ // ------------------------------------------------------------------------
+
+ if(argc == 2 && (!strcmp(argv[1], "version") || !strcmp(argv[1], "-version") || !strcmp(argv[1], "--version") || !strcmp(argv[1], "-v") || !strcmp(argv[1], "-V"))) {
+ fprintf(stderr, "cgroup-network %s\n", VERSION);
+ exit(0);
+ }
+
+ if(argc != 3)
+ usage();
+
+ if(!strcmp(argv[1], "-p") || !strcmp(argv[1], "--pid")) {
+ pid = atoi(argv[2]);
+
+ if(pid <= 0) {
+ errno = 0;
+ error("Invalid pid %d given", (int) pid);
+ return 2;
+ }
+
+ call_the_helper(pid, NULL);
+ }
+ else if(!strcmp(argv[1], "--cgroup")) {
+ char *cgroup = argv[2];
+ if(verify_path(cgroup) == -1)
+ fatal("cgroup '%s' does not exist or is not valid.", cgroup);
+
+ pid = read_pid_from_cgroup(cgroup);
+ call_the_helper(pid, cgroup);
+
+ if(pid <= 0 && !detected_devices) {
+ errno = 0;
+ error("Cannot find a cgroup PID from cgroup '%s'", cgroup);
+ }
+ }
+ else
+ usage();
+
+ if(pid > 0)
+ detect_veth_interfaces(pid);
+
+ int found = send_devices();
+ if(found <= 0) return 1;
+ return 0;
+}
diff --git a/collectors/cgroups.plugin/sys_fs_cgroup.c b/collectors/cgroups.plugin/sys_fs_cgroup.c
new file mode 100644
index 000000000..9c0fd7f43
--- /dev/null
+++ b/collectors/cgroups.plugin/sys_fs_cgroup.c
@@ -0,0 +1,2771 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "sys_fs_cgroup.h"
+
+#define PLUGIN_CGROUPS_NAME "cgroups.plugin"
+#define PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME "systemd"
+#define PLUGIN_CGROUPS_MODULE_CGROUPS_NAME "/sys/fs/cgroup"
+
+// ----------------------------------------------------------------------------
+// cgroup globals
+
+static long system_page_size = 4096; // system will be queried via sysconf() in configuration()
+
+static int cgroup_enable_cpuacct_stat = CONFIG_BOOLEAN_AUTO;
+static int cgroup_enable_cpuacct_usage = CONFIG_BOOLEAN_AUTO;
+static int cgroup_enable_memory = CONFIG_BOOLEAN_AUTO;
+static int cgroup_enable_detailed_memory = CONFIG_BOOLEAN_AUTO;
+static int cgroup_enable_memory_failcnt = CONFIG_BOOLEAN_AUTO;
+static int cgroup_enable_swap = CONFIG_BOOLEAN_AUTO;
+static int cgroup_enable_blkio_io = CONFIG_BOOLEAN_AUTO;
+static int cgroup_enable_blkio_ops = CONFIG_BOOLEAN_AUTO;
+static int cgroup_enable_blkio_throttle_io = CONFIG_BOOLEAN_AUTO;
+static int cgroup_enable_blkio_throttle_ops = CONFIG_BOOLEAN_AUTO;
+static int cgroup_enable_blkio_merged_ops = CONFIG_BOOLEAN_AUTO;
+static int cgroup_enable_blkio_queued_ops = CONFIG_BOOLEAN_AUTO;
+
+static int cgroup_enable_systemd_services = CONFIG_BOOLEAN_YES;
+static int cgroup_enable_systemd_services_detailed_memory = CONFIG_BOOLEAN_NO;
+static int cgroup_used_memory_without_cache = CONFIG_BOOLEAN_YES;
+
+static int cgroup_search_in_devices = 1;
+
+static int cgroup_enable_new_cgroups_detected_at_runtime = 1;
+static int cgroup_check_for_new_every = 10;
+static int cgroup_update_every = 1;
+
+static int cgroup_recheck_zero_blkio_every_iterations = 10;
+static int cgroup_recheck_zero_mem_failcnt_every_iterations = 10;
+static int cgroup_recheck_zero_mem_detailed_every_iterations = 10;
+
+static char *cgroup_cpuacct_base = NULL;
+static char *cgroup_blkio_base = NULL;
+static char *cgroup_memory_base = NULL;
+static char *cgroup_devices_base = NULL;
+
+static int cgroup_root_count = 0;
+static int cgroup_root_max = 1000;
+static int cgroup_max_depth = 0;
+
+static SIMPLE_PATTERN *enabled_cgroup_patterns = NULL;
+static SIMPLE_PATTERN *enabled_cgroup_paths = NULL;
+static SIMPLE_PATTERN *enabled_cgroup_renames = NULL;
+static SIMPLE_PATTERN *systemd_services_cgroups = NULL;
+
+static char *cgroups_rename_script = NULL;
+static char *cgroups_network_interface_script = NULL;
+
+static int cgroups_check = 0;
+
+static uint32_t Read_hash = 0;
+static uint32_t Write_hash = 0;
+static uint32_t user_hash = 0;
+static uint32_t system_hash = 0;
+
+void read_cgroup_plugin_configuration() {
+ system_page_size = sysconf(_SC_PAGESIZE);
+
+ Read_hash = simple_hash("Read");
+ Write_hash = simple_hash("Write");
+ user_hash = simple_hash("user");
+ system_hash = simple_hash("system");
+
+ cgroup_update_every = (int)config_get_number("plugin:cgroups", "update every", localhost->rrd_update_every);
+ if(cgroup_update_every < localhost->rrd_update_every)
+ cgroup_update_every = localhost->rrd_update_every;
+
+ cgroup_check_for_new_every = (int)config_get_number("plugin:cgroups", "check for new cgroups every", (long long)cgroup_check_for_new_every * (long long)cgroup_update_every);
+ if(cgroup_check_for_new_every < cgroup_update_every)
+ cgroup_check_for_new_every = cgroup_update_every;
+
+ cgroup_enable_cpuacct_stat = config_get_boolean_ondemand("plugin:cgroups", "enable cpuacct stat (total CPU)", cgroup_enable_cpuacct_stat);
+ cgroup_enable_cpuacct_usage = config_get_boolean_ondemand("plugin:cgroups", "enable cpuacct usage (per core CPU)", cgroup_enable_cpuacct_usage);
+
+ cgroup_enable_memory = config_get_boolean_ondemand("plugin:cgroups", "enable memory (used mem including cache)", cgroup_enable_memory);
+ cgroup_enable_detailed_memory = config_get_boolean_ondemand("plugin:cgroups", "enable detailed memory", cgroup_enable_detailed_memory);
+ cgroup_enable_memory_failcnt = config_get_boolean_ondemand("plugin:cgroups", "enable memory limits fail count", cgroup_enable_memory_failcnt);
+ cgroup_enable_swap = config_get_boolean_ondemand("plugin:cgroups", "enable swap memory", cgroup_enable_swap);
+
+ cgroup_enable_blkio_io = config_get_boolean_ondemand("plugin:cgroups", "enable blkio bandwidth", cgroup_enable_blkio_io);
+ cgroup_enable_blkio_ops = config_get_boolean_ondemand("plugin:cgroups", "enable blkio operations", cgroup_enable_blkio_ops);
+ cgroup_enable_blkio_throttle_io = config_get_boolean_ondemand("plugin:cgroups", "enable blkio throttle bandwidth", cgroup_enable_blkio_throttle_io);
+ cgroup_enable_blkio_throttle_ops = config_get_boolean_ondemand("plugin:cgroups", "enable blkio throttle operations", cgroup_enable_blkio_throttle_ops);
+ cgroup_enable_blkio_queued_ops = config_get_boolean_ondemand("plugin:cgroups", "enable blkio queued operations", cgroup_enable_blkio_queued_ops);
+ cgroup_enable_blkio_merged_ops = config_get_boolean_ondemand("plugin:cgroups", "enable blkio merged operations", cgroup_enable_blkio_merged_ops);
+
+ cgroup_recheck_zero_blkio_every_iterations = (int)config_get_number("plugin:cgroups", "recheck zero blkio every iterations", cgroup_recheck_zero_blkio_every_iterations);
+ cgroup_recheck_zero_mem_failcnt_every_iterations = (int)config_get_number("plugin:cgroups", "recheck zero memory failcnt every iterations", cgroup_recheck_zero_mem_failcnt_every_iterations);
+ cgroup_recheck_zero_mem_detailed_every_iterations = (int)config_get_number("plugin:cgroups", "recheck zero detailed memory every iterations", cgroup_recheck_zero_mem_detailed_every_iterations);
+
+ cgroup_enable_systemd_services = config_get_boolean("plugin:cgroups", "enable systemd services", cgroup_enable_systemd_services);
+ cgroup_enable_systemd_services_detailed_memory = config_get_boolean("plugin:cgroups", "enable systemd services detailed memory", cgroup_enable_systemd_services_detailed_memory);
+ cgroup_used_memory_without_cache = config_get_boolean("plugin:cgroups", "report used memory without cache", cgroup_used_memory_without_cache);
+
+ char filename[FILENAME_MAX + 1], *s;
+ struct mountinfo *mi, *root = mountinfo_read(0);
+
+ mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "cpuacct");
+ if(!mi) mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "cpuacct");
+ if(!mi) {
+ error("CGROUP: cannot find cpuacct mountinfo. Assuming default: /sys/fs/cgroup/cpuacct");
+ s = "/sys/fs/cgroup/cpuacct";
+ }
+ else s = mi->mount_point;
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, s);
+ cgroup_cpuacct_base = config_get("plugin:cgroups", "path to /sys/fs/cgroup/cpuacct", filename);
+
+ mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "blkio");
+ if(!mi) mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "blkio");
+ if(!mi) {
+ error("CGROUP: cannot find blkio mountinfo. Assuming default: /sys/fs/cgroup/blkio");
+ s = "/sys/fs/cgroup/blkio";
+ }
+ else s = mi->mount_point;
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, s);
+ cgroup_blkio_base = config_get("plugin:cgroups", "path to /sys/fs/cgroup/blkio", filename);
+
+ mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "memory");
+ if(!mi) mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "memory");
+ if(!mi) {
+ error("CGROUP: cannot find memory mountinfo. Assuming default: /sys/fs/cgroup/memory");
+ s = "/sys/fs/cgroup/memory";
+ }
+ else s = mi->mount_point;
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, s);
+ cgroup_memory_base = config_get("plugin:cgroups", "path to /sys/fs/cgroup/memory", filename);
+
+ mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "devices");
+ if(!mi) mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "devices");
+ if(!mi) {
+ error("CGROUP: cannot find devices mountinfo. Assuming default: /sys/fs/cgroup/devices");
+ s = "/sys/fs/cgroup/devices";
+ }
+ else s = mi->mount_point;
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, s);
+ cgroup_devices_base = config_get("plugin:cgroups", "path to /sys/fs/cgroup/devices", filename);
+
+ cgroup_root_max = (int)config_get_number("plugin:cgroups", "max cgroups to allow", cgroup_root_max);
+ cgroup_max_depth = (int)config_get_number("plugin:cgroups", "max cgroups depth to monitor", cgroup_max_depth);
+
+ cgroup_enable_new_cgroups_detected_at_runtime = config_get_boolean("plugin:cgroups", "enable new cgroups detected at run time", cgroup_enable_new_cgroups_detected_at_runtime);
+
+ enabled_cgroup_patterns = simple_pattern_create(
+ config_get("plugin:cgroups", "enable by default cgroups matching",
+ // ----------------------------------------------------------------
+
+ " !*/init.scope " // ignore init.scope
+ " !/system.slice/run-*.scope " // ignore system.slice/run-XXXX.scope
+ " *.scope " // we need all other *.scope for sure
+
+ // ----------------------------------------------------------------
+
+ " /machine.slice/*.service " // #3367 systemd-nspawn
+
+ // ----------------------------------------------------------------
+
+ " !*/vcpu* " // libvirtd adds these sub-cgroups
+ " !*/emulator " // libvirtd adds these sub-cgroups
+ " !*.mount "
+ " !*.partition "
+ " !*.service "
+ " !*.socket "
+ " !*.slice "
+ " !*.swap "
+ " !*.user "
+ " !/ "
+ " !/docker "
+ " !/libvirt "
+ " !/lxc "
+ " !/lxc/*/* " // #1397 #2649
+ " !/machine "
+ " !/qemu "
+ " !/system "
+ " !/systemd "
+ " !/user "
+ " * " // enable anything else
+ ), NULL, SIMPLE_PATTERN_EXACT);
+
+ enabled_cgroup_paths = simple_pattern_create(
+ config_get("plugin:cgroups", "search for cgroups in subpaths matching",
+ " !*/init.scope " // ignore init.scope
+ " !*-qemu " // #345
+ " !*.libvirt-qemu " // #3010
+ " !/init.scope "
+ " !/system "
+ " !/systemd "
+ " !/user "
+ " !/user.slice "
+ " !/lxc/*/* " // #2161 #2649
+ " * "
+ ), NULL, SIMPLE_PATTERN_EXACT);
+
+ snprintfz(filename, FILENAME_MAX, "%s/cgroup-name.sh", netdata_configured_plugins_dir);
+ cgroups_rename_script = config_get("plugin:cgroups", "script to get cgroup names", filename);
+
+ snprintfz(filename, FILENAME_MAX, "%s/cgroup-network", netdata_configured_plugins_dir);
+ cgroups_network_interface_script = config_get("plugin:cgroups", "script to get cgroup network interfaces", filename);
+
+ enabled_cgroup_renames = simple_pattern_create(
+ config_get("plugin:cgroups", "run script to rename cgroups matching",
+ " !/ "
+ " !*.mount "
+ " !*.socket "
+ " !*.partition "
+ " /machine.slice/*.service " // #3367 systemd-nspawn
+ " !*.service "
+ " !*.slice "
+ " !*.swap "
+ " !*.user "
+ " !init.scope "
+ " !*.scope/vcpu* " // libvirtd adds these sub-cgroups
+ " !*.scope/emulator " // libvirtd adds these sub-cgroups
+ " *.scope "
+ " *docker* "
+ " *lxc* "
+ " *qemu* "
+ " *kubepods* " // #3396 kubernetes
+ " *.libvirt-qemu " // #3010
+ " * "
+ ), NULL, SIMPLE_PATTERN_EXACT);
+
+ if(cgroup_enable_systemd_services) {
+ systemd_services_cgroups = simple_pattern_create(
+ config_get("plugin:cgroups", "cgroups to match as systemd services",
+ " !/system.slice/*/*.service "
+ " /system.slice/*.service "
+ ), NULL, SIMPLE_PATTERN_EXACT);
+ }
+
+ mountinfo_free_all(root);
+}
+
+// ----------------------------------------------------------------------------
+// cgroup objects
+
+struct blkio {
+ int updated;
+ int enabled; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
+ int delay_counter;
+
+ char *filename;
+
+ unsigned long long Read;
+ unsigned long long Write;
+/*
+ unsigned long long Sync;
+ unsigned long long Async;
+ unsigned long long Total;
+*/
+};
+
+// https://www.kernel.org/doc/Documentation/cgroup-v1/memory.txt
+struct memory {
+ ARL_BASE *arl_base;
+ ARL_ENTRY *arl_dirty;
+ ARL_ENTRY *arl_swap;
+
+ int updated_detailed;
+ int updated_usage_in_bytes;
+ int updated_msw_usage_in_bytes;
+ int updated_failcnt;
+
+ int enabled_detailed; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
+ int enabled_usage_in_bytes; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
+ int enabled_msw_usage_in_bytes; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
+ int enabled_failcnt; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
+
+ int delay_counter_detailed;
+ int delay_counter_failcnt;
+
+ char *filename_detailed;
+ char *filename_usage_in_bytes;
+ char *filename_msw_usage_in_bytes;
+ char *filename_failcnt;
+
+ int detailed_has_dirty;
+ int detailed_has_swap;
+
+ // detailed metrics
+ unsigned long long cache;
+ unsigned long long rss;
+ unsigned long long rss_huge;
+ unsigned long long mapped_file;
+ unsigned long long writeback;
+ unsigned long long dirty;
+ unsigned long long swap;
+ unsigned long long pgpgin;
+ unsigned long long pgpgout;
+ unsigned long long pgfault;
+ unsigned long long pgmajfault;
+/*
+ unsigned long long inactive_anon;
+ unsigned long long active_anon;
+ unsigned long long inactive_file;
+ unsigned long long active_file;
+ unsigned long long unevictable;
+ unsigned long long hierarchical_memory_limit;
+ unsigned long long total_cache;
+ unsigned long long total_rss;
+ unsigned long long total_rss_huge;
+ unsigned long long total_mapped_file;
+ unsigned long long total_writeback;
+ unsigned long long total_dirty;
+ unsigned long long total_swap;
+ unsigned long long total_pgpgin;
+ unsigned long long total_pgpgout;
+ unsigned long long total_pgfault;
+ unsigned long long total_pgmajfault;
+ unsigned long long total_inactive_anon;
+ unsigned long long total_active_anon;
+ unsigned long long total_inactive_file;
+ unsigned long long total_active_file;
+ unsigned long long total_unevictable;
+*/
+
+ // single file metrics
+ unsigned long long usage_in_bytes;
+ unsigned long long msw_usage_in_bytes;
+ unsigned long long failcnt;
+};
+
+// https://www.kernel.org/doc/Documentation/cgroup-v1/cpuacct.txt
+struct cpuacct_stat {
+ int updated;
+ int enabled; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
+
+ char *filename;
+
+ unsigned long long user;
+ unsigned long long system;
+};
+
+// https://www.kernel.org/doc/Documentation/cgroup-v1/cpuacct.txt
+struct cpuacct_usage {
+ int updated;
+ int enabled; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
+
+ char *filename;
+
+ unsigned int cpus;
+ unsigned long long *cpu_percpu;
+};
+
+struct cgroup_network_interface {
+ const char *host_device;
+ const char *container_device;
+ struct cgroup_network_interface *next;
+};
+
+#define CGROUP_OPTIONS_DISABLED_DUPLICATE 0x00000001
+#define CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE 0x00000002
+
+struct cgroup {
+ uint32_t options;
+
+ char available; // found in the filesystem
+ char enabled; // enabled in the config
+
+ char *id;
+ uint32_t hash;
+
+ char *chart_id;
+ uint32_t hash_chart;
+
+ char *chart_title;
+
+ struct cpuacct_stat cpuacct_stat;
+ struct cpuacct_usage cpuacct_usage;
+
+ struct memory memory;
+
+ struct blkio io_service_bytes; // bytes
+ struct blkio io_serviced; // operations
+
+ struct blkio throttle_io_service_bytes; // bytes
+ struct blkio throttle_io_serviced; // operations
+
+ struct blkio io_merged; // operations
+ struct blkio io_queued; // operations
+
+ struct cgroup_network_interface *interfaces;
+
+ // per cgroup charts
+ RRDSET *st_cpu;
+ RRDSET *st_cpu_per_core;
+ RRDSET *st_mem;
+ RRDSET *st_writeback;
+ RRDSET *st_mem_activity;
+ RRDSET *st_pgfaults;
+ RRDSET *st_mem_usage;
+ RRDSET *st_mem_failcnt;
+ RRDSET *st_io;
+ RRDSET *st_serviced_ops;
+ RRDSET *st_throttle_io;
+ RRDSET *st_throttle_serviced_ops;
+ RRDSET *st_queued_ops;
+ RRDSET *st_merged_ops;
+
+ // services
+ RRDDIM *rd_cpu;
+ RRDDIM *rd_mem_usage;
+ RRDDIM *rd_mem_failcnt;
+ RRDDIM *rd_swap_usage;
+
+ RRDDIM *rd_mem_detailed_cache;
+ RRDDIM *rd_mem_detailed_rss;
+ RRDDIM *rd_mem_detailed_mapped;
+ RRDDIM *rd_mem_detailed_writeback;
+ RRDDIM *rd_mem_detailed_pgpgin;
+ RRDDIM *rd_mem_detailed_pgpgout;
+ RRDDIM *rd_mem_detailed_pgfault;
+ RRDDIM *rd_mem_detailed_pgmajfault;
+
+ RRDDIM *rd_io_service_bytes_read;
+ RRDDIM *rd_io_serviced_read;
+ RRDDIM *rd_throttle_io_read;
+ RRDDIM *rd_throttle_io_serviced_read;
+ RRDDIM *rd_io_queued_read;
+ RRDDIM *rd_io_merged_read;
+
+ RRDDIM *rd_io_service_bytes_write;
+ RRDDIM *rd_io_serviced_write;
+ RRDDIM *rd_throttle_io_write;
+ RRDDIM *rd_throttle_io_serviced_write;
+ RRDDIM *rd_io_queued_write;
+ RRDDIM *rd_io_merged_write;
+
+ struct cgroup *next;
+
+} *cgroup_root = NULL;
+
+// ----------------------------------------------------------------------------
+// read values from /sys
+
+static inline void cgroup_read_cpuacct_stat(struct cpuacct_stat *cp) {
+ static procfile *ff = NULL;
+
+ if(likely(cp->filename)) {
+ ff = procfile_reopen(ff, cp->filename, NULL, PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) {
+ cp->updated = 0;
+ cgroups_check = 1;
+ return;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) {
+ cp->updated = 0;
+ cgroups_check = 1;
+ return;
+ }
+
+ unsigned long i, lines = procfile_lines(ff);
+
+ if(unlikely(lines < 1)) {
+ error("CGROUP: file '%s' should have 1+ lines.", cp->filename);
+ cp->updated = 0;
+ return;
+ }
+
+ for(i = 0; i < lines ; i++) {
+ char *s = procfile_lineword(ff, i, 0);
+ uint32_t hash = simple_hash(s);
+
+ if(unlikely(hash == user_hash && !strcmp(s, "user")))
+ cp->user = str2ull(procfile_lineword(ff, i, 1));
+
+ else if(unlikely(hash == system_hash && !strcmp(s, "system")))
+ cp->system = str2ull(procfile_lineword(ff, i, 1));
+ }
+
+ cp->updated = 1;
+
+ if(unlikely(cp->enabled == CONFIG_BOOLEAN_AUTO && (cp->user || cp->system)))
+ cp->enabled = CONFIG_BOOLEAN_YES;
+ }
+}
+
+static inline void cgroup_read_cpuacct_usage(struct cpuacct_usage *ca) {
+ static procfile *ff = NULL;
+
+ if(likely(ca->filename)) {
+ ff = procfile_reopen(ff, ca->filename, NULL, PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) {
+ ca->updated = 0;
+ cgroups_check = 1;
+ return;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) {
+ ca->updated = 0;
+ cgroups_check = 1;
+ return;
+ }
+
+ if(unlikely(procfile_lines(ff) < 1)) {
+ error("CGROUP: file '%s' should have 1+ lines but has %zu.", ca->filename, procfile_lines(ff));
+ ca->updated = 0;
+ return;
+ }
+
+ unsigned long i = procfile_linewords(ff, 0);
+ if(unlikely(i == 0)) {
+ ca->updated = 0;
+ return;
+ }
+
+ // we may have 1 more CPU reported
+ while(i > 0) {
+ char *s = procfile_lineword(ff, 0, i - 1);
+ if(!*s) i--;
+ else break;
+ }
+
+ if(unlikely(i != ca->cpus)) {
+ freez(ca->cpu_percpu);
+ ca->cpu_percpu = mallocz(sizeof(unsigned long long) * i);
+ ca->cpus = (unsigned int)i;
+ }
+
+ unsigned long long total = 0;
+ for(i = 0; i < ca->cpus ;i++) {
+ unsigned long long n = str2ull(procfile_lineword(ff, 0, i));
+ ca->cpu_percpu[i] = n;
+ total += n;
+ }
+
+ ca->updated = 1;
+
+ if(unlikely(ca->enabled == CONFIG_BOOLEAN_AUTO && total))
+ ca->enabled = CONFIG_BOOLEAN_YES;
+ }
+}
+
+static inline void cgroup_read_blkio(struct blkio *io) {
+ if(unlikely(io->enabled == CONFIG_BOOLEAN_AUTO && io->delay_counter > 0)) {
+ io->delay_counter--;
+ return;
+ }
+
+ if(likely(io->filename)) {
+ static procfile *ff = NULL;
+
+ ff = procfile_reopen(ff, io->filename, NULL, PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) {
+ io->updated = 0;
+ cgroups_check = 1;
+ return;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) {
+ io->updated = 0;
+ cgroups_check = 1;
+ return;
+ }
+
+ unsigned long i, lines = procfile_lines(ff);
+
+ if(unlikely(lines < 1)) {
+ error("CGROUP: file '%s' should have 1+ lines.", io->filename);
+ io->updated = 0;
+ return;
+ }
+
+ io->Read = 0;
+ io->Write = 0;
+/*
+ io->Sync = 0;
+ io->Async = 0;
+ io->Total = 0;
+*/
+
+ for(i = 0; i < lines ; i++) {
+ char *s = procfile_lineword(ff, i, 1);
+ uint32_t hash = simple_hash(s);
+
+ if(unlikely(hash == Read_hash && !strcmp(s, "Read")))
+ io->Read += str2ull(procfile_lineword(ff, i, 2));
+
+ else if(unlikely(hash == Write_hash && !strcmp(s, "Write")))
+ io->Write += str2ull(procfile_lineword(ff, i, 2));
+
+/*
+ else if(unlikely(hash == Sync_hash && !strcmp(s, "Sync")))
+ io->Sync += str2ull(procfile_lineword(ff, i, 2));
+
+ else if(unlikely(hash == Async_hash && !strcmp(s, "Async")))
+ io->Async += str2ull(procfile_lineword(ff, i, 2));
+
+ else if(unlikely(hash == Total_hash && !strcmp(s, "Total")))
+ io->Total += str2ull(procfile_lineword(ff, i, 2));
+*/
+ }
+
+ io->updated = 1;
+
+ if(unlikely(io->enabled == CONFIG_BOOLEAN_AUTO)) {
+ if(unlikely(io->Read || io->Write))
+ io->enabled = CONFIG_BOOLEAN_YES;
+ else
+ io->delay_counter = cgroup_recheck_zero_blkio_every_iterations;
+ }
+ }
+}
+
+static inline void cgroup_read_memory(struct memory *mem) {
+ static procfile *ff = NULL;
+
+ // read detailed ram usage
+ if(likely(mem->filename_detailed)) {
+ if(unlikely(mem->enabled_detailed == CONFIG_BOOLEAN_AUTO && mem->delay_counter_detailed > 0)) {
+ mem->delay_counter_detailed--;
+ goto memory_next;
+ }
+
+ ff = procfile_reopen(ff, mem->filename_detailed, NULL, PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) {
+ mem->updated_detailed = 0;
+ cgroups_check = 1;
+ goto memory_next;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) {
+ mem->updated_detailed = 0;
+ cgroups_check = 1;
+ goto memory_next;
+ }
+
+ unsigned long i, lines = procfile_lines(ff);
+
+ if(unlikely(lines < 1)) {
+ error("CGROUP: file '%s' should have 1+ lines.", mem->filename_detailed);
+ mem->updated_detailed = 0;
+ goto memory_next;
+ }
+
+ if(unlikely(!mem->arl_base)) {
+ mem->arl_base = arl_create("cgroup/memory", NULL, 60);
+
+ arl_expect(mem->arl_base, "cache", &mem->cache);
+ arl_expect(mem->arl_base, "rss", &mem->rss);
+ arl_expect(mem->arl_base, "rss_huge", &mem->rss_huge);
+ arl_expect(mem->arl_base, "mapped_file", &mem->mapped_file);
+ arl_expect(mem->arl_base, "writeback", &mem->writeback);
+ mem->arl_dirty = arl_expect(mem->arl_base, "dirty", &mem->dirty);
+ mem->arl_swap = arl_expect(mem->arl_base, "swap", &mem->swap);
+ arl_expect(mem->arl_base, "pgpgin", &mem->pgpgin);
+ arl_expect(mem->arl_base, "pgpgout", &mem->pgpgout);
+ arl_expect(mem->arl_base, "pgfault", &mem->pgfault);
+ arl_expect(mem->arl_base, "pgmajfault", &mem->pgmajfault);
+ }
+
+ arl_begin(mem->arl_base);
+
+ for(i = 0; i < lines ; i++) {
+ if(arl_check(mem->arl_base,
+ procfile_lineword(ff, i, 0),
+ procfile_lineword(ff, i, 1))) break;
+ }
+
+ if(unlikely(mem->arl_dirty->flags & ARL_ENTRY_FLAG_FOUND))
+ mem->detailed_has_dirty = 1;
+
+ if(unlikely(mem->arl_swap->flags & ARL_ENTRY_FLAG_FOUND))
+ mem->detailed_has_swap = 1;
+
+ // fprintf(stderr, "READ: '%s', cache: %llu, rss: %llu, rss_huge: %llu, mapped_file: %llu, writeback: %llu, dirty: %llu, swap: %llu, pgpgin: %llu, pgpgout: %llu, pgfault: %llu, pgmajfault: %llu, inactive_anon: %llu, active_anon: %llu, inactive_file: %llu, active_file: %llu, unevictable: %llu, hierarchical_memory_limit: %llu, total_cache: %llu, total_rss: %llu, total_rss_huge: %llu, total_mapped_file: %llu, total_writeback: %llu, total_dirty: %llu, total_swap: %llu, total_pgpgin: %llu, total_pgpgout: %llu, total_pgfault: %llu, total_pgmajfault: %llu, total_inactive_anon: %llu, total_active_anon: %llu, total_inactive_file: %llu, total_active_file: %llu, total_unevictable: %llu\n", mem->filename, mem->cache, mem->rss, mem->rss_huge, mem->mapped_file, mem->writeback, mem->dirty, mem->swap, mem->pgpgin, mem->pgpgout, mem->pgfault, mem->pgmajfault, mem->inactive_anon, mem->active_anon, mem->inactive_file, mem->active_file, mem->unevictable, mem->hierarchical_memory_limit, mem->total_cache, mem->total_rss, mem->total_rss_huge, mem->total_mapped_file, mem->total_writeback, mem->total_dirty, mem->total_swap, mem->total_pgpgin, mem->total_pgpgout, mem->total_pgfault, mem->total_pgmajfault, mem->total_inactive_anon, mem->total_active_anon, mem->total_inactive_file, mem->total_active_file, mem->total_unevictable);
+
+ mem->updated_detailed = 1;
+
+ if(unlikely(mem->enabled_detailed == CONFIG_BOOLEAN_AUTO)) {
+ if(mem->cache || mem->dirty || mem->rss || mem->rss_huge || mem->mapped_file || mem->writeback || mem->swap || mem->pgpgin || mem->pgpgout || mem->pgfault || mem->pgmajfault)
+ mem->enabled_detailed = CONFIG_BOOLEAN_YES;
+ else
+ mem->delay_counter_detailed = cgroup_recheck_zero_mem_detailed_every_iterations;
+ }
+ }
+
+memory_next:
+
+ // read usage_in_bytes
+ if(likely(mem->filename_usage_in_bytes)) {
+ mem->updated_usage_in_bytes = !read_single_number_file(mem->filename_usage_in_bytes, &mem->usage_in_bytes);
+ if(unlikely(mem->updated_usage_in_bytes && mem->enabled_usage_in_bytes == CONFIG_BOOLEAN_AUTO && mem->usage_in_bytes))
+ mem->enabled_usage_in_bytes = CONFIG_BOOLEAN_YES;
+ }
+
+ // read msw_usage_in_bytes
+ if(likely(mem->filename_msw_usage_in_bytes)) {
+ mem->updated_msw_usage_in_bytes = !read_single_number_file(mem->filename_msw_usage_in_bytes, &mem->msw_usage_in_bytes);
+ if(unlikely(mem->updated_msw_usage_in_bytes && mem->enabled_msw_usage_in_bytes == CONFIG_BOOLEAN_AUTO && mem->msw_usage_in_bytes))
+ mem->enabled_msw_usage_in_bytes = CONFIG_BOOLEAN_YES;
+ }
+
+ // read failcnt
+ if(likely(mem->filename_failcnt)) {
+ if(unlikely(mem->enabled_failcnt == CONFIG_BOOLEAN_AUTO && mem->delay_counter_failcnt > 0)) {
+ mem->updated_failcnt = 0;
+ mem->delay_counter_failcnt--;
+ }
+ else {
+ mem->updated_failcnt = !read_single_number_file(mem->filename_failcnt, &mem->failcnt);
+ if(unlikely(mem->updated_failcnt && mem->enabled_failcnt == CONFIG_BOOLEAN_AUTO)) {
+ if(unlikely(!mem->failcnt))
+ mem->delay_counter_failcnt = cgroup_recheck_zero_mem_failcnt_every_iterations;
+ else
+ mem->enabled_failcnt = CONFIG_BOOLEAN_YES;
+ }
+ }
+ }
+}
+
+static inline void cgroup_read(struct cgroup *cg) {
+ debug(D_CGROUP, "reading metrics for cgroups '%s'", cg->id);
+
+ cgroup_read_cpuacct_stat(&cg->cpuacct_stat);
+ cgroup_read_cpuacct_usage(&cg->cpuacct_usage);
+ cgroup_read_memory(&cg->memory);
+ cgroup_read_blkio(&cg->io_service_bytes);
+ cgroup_read_blkio(&cg->io_serviced);
+ cgroup_read_blkio(&cg->throttle_io_service_bytes);
+ cgroup_read_blkio(&cg->throttle_io_serviced);
+ cgroup_read_blkio(&cg->io_merged);
+ cgroup_read_blkio(&cg->io_queued);
+}
+
+static inline void read_all_cgroups(struct cgroup *root) {
+ debug(D_CGROUP, "reading metrics for all cgroups");
+
+ struct cgroup *cg;
+
+ for(cg = root; cg ; cg = cg->next)
+ if(cg->enabled && cg->available)
+ cgroup_read(cg);
+}
+
+// ----------------------------------------------------------------------------
+// cgroup network interfaces
+
+#define CGROUP_NETWORK_INTERFACE_MAX_LINE 2048
+static inline void read_cgroup_network_interfaces(struct cgroup *cg) {
+ debug(D_CGROUP, "looking for the network interfaces of cgroup '%s' with chart id '%s' and title '%s'", cg->id, cg->chart_id, cg->chart_title);
+
+ pid_t cgroup_pid;
+ char command[CGROUP_NETWORK_INTERFACE_MAX_LINE + 1];
+
+ snprintfz(command, CGROUP_NETWORK_INTERFACE_MAX_LINE, "exec %s --cgroup '%s%s'", cgroups_network_interface_script, cgroup_cpuacct_base, cg->id);
+
+ debug(D_CGROUP, "executing command '%s' for cgroup '%s'", command, cg->id);
+ FILE *fp = mypopen(command, &cgroup_pid);
+ if(!fp) {
+ error("CGROUP: cannot popen(\"%s\", \"r\").", command);
+ return;
+ }
+
+ char *s;
+ char buffer[CGROUP_NETWORK_INTERFACE_MAX_LINE + 1];
+ while((s = fgets(buffer, CGROUP_NETWORK_INTERFACE_MAX_LINE, fp))) {
+ trim(s);
+
+ if(*s && *s != '\n') {
+ char *t = s;
+ while(*t && *t != ' ') t++;
+ if(*t == ' ') {
+ *t = '\0';
+ t++;
+ }
+
+ if(!*s) {
+ error("CGROUP: empty host interface returned by script");
+ continue;
+ }
+
+ if(!*t) {
+ error("CGROUP: empty guest interface returned by script");
+ continue;
+ }
+
+ struct cgroup_network_interface *i = callocz(1, sizeof(struct cgroup_network_interface));
+ i->host_device = strdupz(s);
+ i->container_device = strdupz(t);
+ i->next = cg->interfaces;
+ cg->interfaces = i;
+
+ info("CGROUP: cgroup '%s' has network interface '%s' as '%s'", cg->id, i->host_device, i->container_device);
+
+ // register a device rename to proc_net_dev.c
+ netdev_rename_device_add(i->host_device, i->container_device, cg->chart_id);
+ }
+ }
+
+ mypclose(fp, cgroup_pid);
+ // debug(D_CGROUP, "closed command for cgroup '%s'", cg->id);
+}
+
+static inline void free_cgroup_network_interfaces(struct cgroup *cg) {
+ while(cg->interfaces) {
+ struct cgroup_network_interface *i = cg->interfaces;
+ cg->interfaces = i->next;
+
+ // delete the registration of proc_net_dev rename
+ netdev_rename_device_del(i->host_device);
+
+ freez((void *)i->host_device);
+ freez((void *)i->container_device);
+ freez((void *)i);
+ }
+}
+
+// ----------------------------------------------------------------------------
+// add/remove/find cgroup objects
+
+#define CGROUP_CHARTID_LINE_MAX 1024
+
+static inline char *cgroup_title_strdupz(const char *s) {
+ if(!s || !*s) s = "/";
+
+ if(*s == '/' && s[1] != '\0') s++;
+
+ char *r = strdupz(s);
+ netdata_fix_chart_name(r);
+
+ return r;
+}
+
+static inline char *cgroup_chart_id_strdupz(const char *s) {
+ if(!s || !*s) s = "/";
+
+ if(*s == '/' && s[1] != '\0') s++;
+
+ char *r = strdupz(s);
+ netdata_fix_chart_id(r);
+
+ return r;
+}
+
+static inline void cgroup_get_chart_name(struct cgroup *cg) {
+ debug(D_CGROUP, "looking for the name of cgroup '%s' with chart id '%s' and title '%s'", cg->id, cg->chart_id, cg->chart_title);
+
+ pid_t cgroup_pid;
+ char command[CGROUP_CHARTID_LINE_MAX + 1];
+
+ snprintfz(command, CGROUP_CHARTID_LINE_MAX, "exec %s '%s' '%s'", cgroups_rename_script, cg->chart_id, cg->id);
+
+ debug(D_CGROUP, "executing command \"%s\" for cgroup '%s'", command, cg->id);
+ FILE *fp = mypopen(command, &cgroup_pid);
+ if(fp) {
+ // debug(D_CGROUP, "reading from command '%s' for cgroup '%s'", command, cg->id);
+ char buffer[CGROUP_CHARTID_LINE_MAX + 1];
+ char *s = fgets(buffer, CGROUP_CHARTID_LINE_MAX, fp);
+ // debug(D_CGROUP, "closing command for cgroup '%s'", cg->id);
+ mypclose(fp, cgroup_pid);
+ // debug(D_CGROUP, "closed command for cgroup '%s'", cg->id);
+
+ if(s && *s && *s != '\n') {
+ debug(D_CGROUP, "cgroup '%s' should be renamed to '%s'", cg->id, s);
+
+ trim(s);
+
+ freez(cg->chart_title);
+ cg->chart_title = cgroup_title_strdupz(s);
+
+ freez(cg->chart_id);
+ cg->chart_id = cgroup_chart_id_strdupz(s);
+ cg->hash_chart = simple_hash(cg->chart_id);
+ }
+ }
+ else
+ error("CGROUP: cannot popen(\"%s\", \"r\").", command);
+}
+
+static inline struct cgroup *cgroup_add(const char *id) {
+ if(!id || !*id) id = "/";
+ debug(D_CGROUP, "adding to list, cgroup with id '%s'", id);
+
+ if(cgroup_root_count >= cgroup_root_max) {
+ info("CGROUP: maximum number of cgroups reached (%d). Not adding cgroup '%s'", cgroup_root_count, id);
+ return NULL;
+ }
+
+ int def = simple_pattern_matches(enabled_cgroup_patterns, id)?cgroup_enable_new_cgroups_detected_at_runtime:0;
+ struct cgroup *cg = callocz(1, sizeof(struct cgroup));
+
+ cg->id = strdupz(id);
+ cg->hash = simple_hash(cg->id);
+
+ cg->chart_title = cgroup_title_strdupz(id);
+
+ cg->chart_id = cgroup_chart_id_strdupz(id);
+ cg->hash_chart = simple_hash(cg->chart_id);
+
+ if(!cgroup_root)
+ cgroup_root = cg;
+ else {
+ // append it
+ struct cgroup *e;
+ for(e = cgroup_root; e->next ;e = e->next) ;
+ e->next = cg;
+ }
+
+ cgroup_root_count++;
+
+ // fix the chart_id and title by calling the external script
+ if(simple_pattern_matches(enabled_cgroup_renames, cg->id)) {
+
+ cgroup_get_chart_name(cg);
+
+ debug(D_CGROUP, "cgroup '%s' renamed to '%s' (title: '%s')", cg->id, cg->chart_id, cg->chart_title);
+ }
+ else
+ debug(D_CGROUP, "cgroup '%s' will not be renamed - it matches the list of disabled cgroup renames (will be shown as '%s')", cg->id, cg->chart_id);
+
+ int user_configurable = 1;
+
+ // check if this cgroup should be a systemd service
+ if(cgroup_enable_systemd_services) {
+ if(simple_pattern_matches(systemd_services_cgroups, cg->id) ||
+ simple_pattern_matches(systemd_services_cgroups, cg->chart_id)) {
+ debug(D_CGROUP, "cgroup '%s' with chart id '%s' (title: '%s') matches systemd services cgroups", cg->id, cg->chart_id, cg->chart_title);
+
+ char buffer[CGROUP_CHARTID_LINE_MAX + 1];
+ cg->options |= CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE;
+
+ strncpy(buffer, cg->id, CGROUP_CHARTID_LINE_MAX);
+ char *s = buffer;
+
+ //freez(cg->chart_id);
+ //cg->chart_id = cgroup_chart_id_strdupz(s);
+ //cg->hash_chart = simple_hash(cg->chart_id);
+
+ // skip to the last slash
+ size_t len = strlen(s);
+ while(len--) if(unlikely(s[len] == '/')) break;
+ if(len) s = &s[len + 1];
+
+ // remove extension
+ len = strlen(s);
+ while(len--) if(unlikely(s[len] == '.')) break;
+ if(len) s[len] = '\0';
+
+ freez(cg->chart_title);
+ cg->chart_title = cgroup_title_strdupz(s);
+
+ cg->enabled = 1;
+ user_configurable = 0;
+
+ debug(D_CGROUP, "cgroup '%s' renamed to '%s' (title: '%s')", cg->id, cg->chart_id, cg->chart_title);
+ }
+ else
+ debug(D_CGROUP, "cgroup '%s' with chart id '%s' (title: '%s') does not match systemd services groups", cg->id, cg->chart_id, cg->chart_title);
+ }
+
+ if(user_configurable) {
+ // allow the user to enable/disable this individualy
+ char option[FILENAME_MAX + 1];
+ snprintfz(option, FILENAME_MAX, "enable cgroup %s", cg->chart_title);
+ cg->enabled = (char) config_get_boolean("plugin:cgroups", option, def);
+ }
+
+ // detect duplicate cgroups
+ if(cg->enabled) {
+ struct cgroup *t;
+ for (t = cgroup_root; t; t = t->next) {
+ if (t != cg && t->enabled && t->hash_chart == cg->hash_chart && !strcmp(t->chart_id, cg->chart_id)) {
+ if (!strncmp(t->chart_id, "/system.slice/", 14) && !strncmp(cg->chart_id, "/init.scope/system.slice/", 25)) {
+ error("CGROUP: chart id '%s' already exists with id '%s' and is enabled. Swapping them by enabling cgroup with id '%s' and disabling cgroup with id '%s'.",
+ cg->chart_id, t->id, cg->id, t->id);
+ debug(D_CGROUP, "Control group with chart id '%s' already exists with id '%s' and is enabled. Swapping them by enabling cgroup with id '%s' and disabling cgroup with id '%s'.",
+ cg->chart_id, t->id, cg->id, t->id);
+ t->enabled = 0;
+ t->options |= CGROUP_OPTIONS_DISABLED_DUPLICATE;
+ }
+ else {
+ error("CGROUP: chart id '%s' already exists with id '%s' and is enabled and available. Disabling cgroup with id '%s'.",
+ cg->chart_id, t->id, cg->id);
+ debug(D_CGROUP, "Control group with chart id '%s' already exists with id '%s' and is enabled and available. Disabling cgroup with id '%s'.",
+ cg->chart_id, t->id, cg->id);
+ cg->enabled = 0;
+ cg->options |= CGROUP_OPTIONS_DISABLED_DUPLICATE;
+ }
+
+ break;
+ }
+ }
+ }
+
+ if(cg->enabled && !(cg->options & CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE))
+ read_cgroup_network_interfaces(cg);
+
+ debug(D_CGROUP, "ADDED CGROUP: '%s' with chart id '%s' and title '%s' as %s (default was %s)", cg->id, cg->chart_id, cg->chart_title, (cg->enabled)?"enabled":"disabled", (def)?"enabled":"disabled");
+
+ return cg;
+}
+
+static inline void cgroup_free(struct cgroup *cg) {
+ debug(D_CGROUP, "Removing cgroup '%s' with chart id '%s' (was %s and %s)", cg->id, cg->chart_id, (cg->enabled)?"enabled":"disabled", (cg->available)?"available":"not available");
+
+ if(cg->st_cpu) rrdset_is_obsolete(cg->st_cpu);
+ if(cg->st_cpu_per_core) rrdset_is_obsolete(cg->st_cpu_per_core);
+ if(cg->st_mem) rrdset_is_obsolete(cg->st_mem);
+ if(cg->st_writeback) rrdset_is_obsolete(cg->st_writeback);
+ if(cg->st_mem_activity) rrdset_is_obsolete(cg->st_mem_activity);
+ if(cg->st_pgfaults) rrdset_is_obsolete(cg->st_pgfaults);
+ if(cg->st_mem_usage) rrdset_is_obsolete(cg->st_mem_usage);
+ if(cg->st_mem_failcnt) rrdset_is_obsolete(cg->st_mem_failcnt);
+ if(cg->st_io) rrdset_is_obsolete(cg->st_io);
+ if(cg->st_serviced_ops) rrdset_is_obsolete(cg->st_serviced_ops);
+ if(cg->st_throttle_io) rrdset_is_obsolete(cg->st_throttle_io);
+ if(cg->st_throttle_serviced_ops) rrdset_is_obsolete(cg->st_throttle_serviced_ops);
+ if(cg->st_queued_ops) rrdset_is_obsolete(cg->st_queued_ops);
+ if(cg->st_merged_ops) rrdset_is_obsolete(cg->st_merged_ops);
+
+ free_cgroup_network_interfaces(cg);
+
+ freez(cg->cpuacct_usage.cpu_percpu);
+
+ freez(cg->cpuacct_stat.filename);
+ freez(cg->cpuacct_usage.filename);
+
+ arl_free(cg->memory.arl_base);
+ freez(cg->memory.filename_detailed);
+ freez(cg->memory.filename_failcnt);
+ freez(cg->memory.filename_usage_in_bytes);
+ freez(cg->memory.filename_msw_usage_in_bytes);
+
+ freez(cg->io_service_bytes.filename);
+ freez(cg->io_serviced.filename);
+
+ freez(cg->throttle_io_service_bytes.filename);
+ freez(cg->throttle_io_serviced.filename);
+
+ freez(cg->io_merged.filename);
+ freez(cg->io_queued.filename);
+
+ freez(cg->id);
+ freez(cg->chart_id);
+ freez(cg->chart_title);
+
+ freez(cg);
+
+ cgroup_root_count--;
+}
+
+// find if a given cgroup exists
+static inline struct cgroup *cgroup_find(const char *id) {
+ debug(D_CGROUP, "searching for cgroup '%s'", id);
+
+ uint32_t hash = simple_hash(id);
+
+ struct cgroup *cg;
+ for(cg = cgroup_root; cg ; cg = cg->next) {
+ if(hash == cg->hash && strcmp(id, cg->id) == 0)
+ break;
+ }
+
+ debug(D_CGROUP, "cgroup '%s' %s in memory", id, (cg)?"found":"not found");
+ return cg;
+}
+
+// ----------------------------------------------------------------------------
+// detect running cgroups
+
+// callback for find_file_in_subdirs()
+static inline void found_subdir_in_dir(const char *dir) {
+ debug(D_CGROUP, "examining cgroup dir '%s'", dir);
+
+ struct cgroup *cg = cgroup_find(dir);
+ if(!cg) {
+ if(*dir && cgroup_max_depth > 0) {
+ int depth = 0;
+ const char *s;
+
+ for(s = dir; *s ;s++)
+ if(unlikely(*s == '/'))
+ depth++;
+
+ if(depth > cgroup_max_depth) {
+ info("CGROUP: '%s' is too deep (%d, while max is %d)", dir, depth, cgroup_max_depth);
+ return;
+ }
+ }
+ // debug(D_CGROUP, "will add dir '%s' as cgroup", dir);
+ cg = cgroup_add(dir);
+ }
+
+ if(cg) cg->available = 1;
+}
+
+static inline int find_dir_in_subdirs(const char *base, const char *this, void (*callback)(const char *)) {
+ if(!this) this = base;
+ debug(D_CGROUP, "searching for directories in '%s' (base '%s')", this?this:"", base);
+
+ size_t dirlen = strlen(this), baselen = strlen(base);
+
+ int ret = -1;
+ int enabled = -1;
+
+ const char *relative_path = &this[baselen];
+ if(!*relative_path) relative_path = "/";
+
+ DIR *dir = opendir(this);
+ if(!dir) {
+ error("CGROUP: cannot read directory '%s'", base);
+ return ret;
+ }
+ ret = 1;
+
+ callback(relative_path);
+
+ struct dirent *de = NULL;
+ while((de = readdir(dir))) {
+ if(de->d_type == DT_DIR
+ && (
+ (de->d_name[0] == '.' && de->d_name[1] == '\0')
+ || (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0')
+ ))
+ continue;
+
+ if(de->d_type == DT_DIR) {
+ if(enabled == -1) {
+ const char *r = relative_path;
+ if(*r == '\0') r = "/";
+
+ // do not decent in directories we are not interested
+ int def = simple_pattern_matches(enabled_cgroup_paths, r);
+
+ // we check for this option here
+ // so that the config will not have settings
+ // for leaf directories
+ char option[FILENAME_MAX + 1];
+ snprintfz(option, FILENAME_MAX, "search for cgroups under %s", r);
+ option[FILENAME_MAX] = '\0';
+ enabled = config_get_boolean("plugin:cgroups", option, def);
+ }
+
+ if(enabled) {
+ char *s = mallocz(dirlen + strlen(de->d_name) + 2);
+ strcpy(s, this);
+ strcat(s, "/");
+ strcat(s, de->d_name);
+ int ret2 = find_dir_in_subdirs(base, s, callback);
+ if(ret2 > 0) ret += ret2;
+ freez(s);
+ }
+ }
+ }
+
+ closedir(dir);
+ return ret;
+}
+
+static inline void mark_all_cgroups_as_not_available() {
+ debug(D_CGROUP, "marking all cgroups as not available");
+
+ struct cgroup *cg;
+
+ // mark all as not available
+ for(cg = cgroup_root; cg ; cg = cg->next) {
+ cg->available = 0;
+ }
+}
+
+static inline void cleanup_all_cgroups() {
+ struct cgroup *cg = cgroup_root, *last = NULL;
+
+ for(; cg ;) {
+ if(!cg->available) {
+ // enable the first duplicate cgroup
+ {
+ struct cgroup *t;
+ for(t = cgroup_root; t ; t = t->next) {
+ if(t != cg && t->available && !t->enabled && t->options & CGROUP_OPTIONS_DISABLED_DUPLICATE && t->hash_chart == cg->hash_chart && !strcmp(t->chart_id, cg->chart_id)) {
+ debug(D_CGROUP, "Enabling duplicate of cgroup '%s' with id '%s', because the original with id '%s' stopped.", t->chart_id, t->id, cg->id);
+ t->enabled = 1;
+ t->options &= ~CGROUP_OPTIONS_DISABLED_DUPLICATE;
+ break;
+ }
+ }
+ }
+
+ if(!last)
+ cgroup_root = cg->next;
+ else
+ last->next = cg->next;
+
+ cgroup_free(cg);
+
+ if(!last)
+ cg = cgroup_root;
+ else
+ cg = last->next;
+ }
+ else {
+ last = cg;
+ cg = cg->next;
+ }
+ }
+}
+
+static inline void find_all_cgroups() {
+ debug(D_CGROUP, "searching for cgroups");
+
+ mark_all_cgroups_as_not_available();
+
+ if(cgroup_enable_cpuacct_stat || cgroup_enable_cpuacct_usage) {
+ if(find_dir_in_subdirs(cgroup_cpuacct_base, NULL, found_subdir_in_dir) == -1) {
+ cgroup_enable_cpuacct_stat =
+ cgroup_enable_cpuacct_usage = CONFIG_BOOLEAN_NO;
+ error("CGROUP: disabled cpu statistics.");
+ }
+ }
+
+ if(cgroup_enable_blkio_io || cgroup_enable_blkio_ops || cgroup_enable_blkio_throttle_io || cgroup_enable_blkio_throttle_ops || cgroup_enable_blkio_merged_ops || cgroup_enable_blkio_queued_ops) {
+ if(find_dir_in_subdirs(cgroup_blkio_base, NULL, found_subdir_in_dir) == -1) {
+ cgroup_enable_blkio_io =
+ cgroup_enable_blkio_ops =
+ cgroup_enable_blkio_throttle_io =
+ cgroup_enable_blkio_throttle_ops =
+ cgroup_enable_blkio_merged_ops =
+ cgroup_enable_blkio_queued_ops = CONFIG_BOOLEAN_NO;
+ error("CGROUP: disabled blkio statistics.");
+ }
+ }
+
+ if(cgroup_enable_memory || cgroup_enable_detailed_memory || cgroup_enable_swap || cgroup_enable_memory_failcnt) {
+ if(find_dir_in_subdirs(cgroup_memory_base, NULL, found_subdir_in_dir) == -1) {
+ cgroup_enable_memory =
+ cgroup_enable_detailed_memory =
+ cgroup_enable_swap =
+ cgroup_enable_memory_failcnt = CONFIG_BOOLEAN_NO;
+ error("CGROUP: disabled memory statistics.");
+ }
+ }
+
+ if(cgroup_search_in_devices) {
+ if(find_dir_in_subdirs(cgroup_devices_base, NULL, found_subdir_in_dir) == -1) {
+ cgroup_search_in_devices = 0;
+ error("CGROUP: disabled devices statistics.");
+ }
+ }
+
+ // remove any non-existing cgroups
+ cleanup_all_cgroups();
+
+ struct cgroup *cg;
+ struct stat buf;
+ for(cg = cgroup_root; cg ; cg = cg->next) {
+ // fprintf(stderr, " >>> CGROUP '%s' (%u - %s) with name '%s'\n", cg->id, cg->hash, cg->available?"available":"stopped", cg->name);
+
+ if(unlikely(!cg->available))
+ continue;
+
+ debug(D_CGROUP, "checking paths for cgroup '%s'", cg->id);
+
+ // check for newly added cgroups
+ // and update the filenames they read
+ char filename[FILENAME_MAX + 1];
+ if(unlikely(cgroup_enable_cpuacct_stat && !cg->cpuacct_stat.filename)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/cpuacct.stat", cgroup_cpuacct_base, cg->id);
+ if(likely(stat(filename, &buf) != -1)) {
+ cg->cpuacct_stat.filename = strdupz(filename);
+ cg->cpuacct_stat.enabled = cgroup_enable_cpuacct_stat;
+ debug(D_CGROUP, "cpuacct.stat filename for cgroup '%s': '%s'", cg->id, cg->cpuacct_stat.filename);
+ }
+ else
+ debug(D_CGROUP, "cpuacct.stat file for cgroup '%s': '%s' does not exist.", cg->id, filename);
+ }
+
+ if(unlikely(cgroup_enable_cpuacct_usage && !cg->cpuacct_usage.filename && !(cg->options & CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE))) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/cpuacct.usage_percpu", cgroup_cpuacct_base, cg->id);
+ if(likely(stat(filename, &buf) != -1)) {
+ cg->cpuacct_usage.filename = strdupz(filename);
+ cg->cpuacct_usage.enabled = cgroup_enable_cpuacct_usage;
+ debug(D_CGROUP, "cpuacct.usage_percpu filename for cgroup '%s': '%s'", cg->id, cg->cpuacct_usage.filename);
+ }
+ else
+ debug(D_CGROUP, "cpuacct.usage_percpu file for cgroup '%s': '%s' does not exist.", cg->id, filename);
+ }
+
+ if(unlikely((cgroup_enable_detailed_memory || cgroup_used_memory_without_cache) && !cg->memory.filename_detailed && (cgroup_used_memory_without_cache || cgroup_enable_systemd_services_detailed_memory || !(cg->options & CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE)))) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/memory.stat", cgroup_memory_base, cg->id);
+ if(likely(stat(filename, &buf) != -1)) {
+ cg->memory.filename_detailed = strdupz(filename);
+ cg->memory.enabled_detailed = (cgroup_enable_detailed_memory == CONFIG_BOOLEAN_YES)?CONFIG_BOOLEAN_YES:CONFIG_BOOLEAN_AUTO;
+ debug(D_CGROUP, "memory.stat filename for cgroup '%s': '%s'", cg->id, cg->memory.filename_detailed);
+ }
+ else
+ debug(D_CGROUP, "memory.stat file for cgroup '%s': '%s' does not exist.", cg->id, filename);
+ }
+
+ if(unlikely(cgroup_enable_memory && !cg->memory.filename_usage_in_bytes)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/memory.usage_in_bytes", cgroup_memory_base, cg->id);
+ if(likely(stat(filename, &buf) != -1)) {
+ cg->memory.filename_usage_in_bytes = strdupz(filename);
+ cg->memory.enabled_usage_in_bytes = cgroup_enable_memory;
+ debug(D_CGROUP, "memory.usage_in_bytes filename for cgroup '%s': '%s'", cg->id, cg->memory.filename_usage_in_bytes);
+ }
+ else
+ debug(D_CGROUP, "memory.usage_in_bytes file for cgroup '%s': '%s' does not exist.", cg->id, filename);
+ }
+
+ if(unlikely(cgroup_enable_swap && !cg->memory.filename_msw_usage_in_bytes)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/memory.msw_usage_in_bytes", cgroup_memory_base, cg->id);
+ if(likely(stat(filename, &buf) != -1)) {
+ cg->memory.filename_msw_usage_in_bytes = strdupz(filename);
+ cg->memory.enabled_msw_usage_in_bytes = cgroup_enable_swap;
+ debug(D_CGROUP, "memory.msw_usage_in_bytes filename for cgroup '%s': '%s'", cg->id, cg->memory.filename_msw_usage_in_bytes);
+ }
+ else
+ debug(D_CGROUP, "memory.msw_usage_in_bytes file for cgroup '%s': '%s' does not exist.", cg->id, filename);
+ }
+
+ if(unlikely(cgroup_enable_memory_failcnt && !cg->memory.filename_failcnt)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/memory.failcnt", cgroup_memory_base, cg->id);
+ if(likely(stat(filename, &buf) != -1)) {
+ cg->memory.filename_failcnt = strdupz(filename);
+ cg->memory.enabled_failcnt = cgroup_enable_memory_failcnt;
+ debug(D_CGROUP, "memory.failcnt filename for cgroup '%s': '%s'", cg->id, cg->memory.filename_failcnt);
+ }
+ else
+ debug(D_CGROUP, "memory.failcnt file for cgroup '%s': '%s' does not exist.", cg->id, filename);
+ }
+
+ if(unlikely(cgroup_enable_blkio_io && !cg->io_service_bytes.filename)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_service_bytes", cgroup_blkio_base, cg->id);
+ if(likely(stat(filename, &buf) != -1)) {
+ cg->io_service_bytes.filename = strdupz(filename);
+ cg->io_service_bytes.enabled = cgroup_enable_blkio_io;
+ debug(D_CGROUP, "io_service_bytes filename for cgroup '%s': '%s'", cg->id, cg->io_service_bytes.filename);
+ }
+ else
+ debug(D_CGROUP, "io_service_bytes file for cgroup '%s': '%s' does not exist.", cg->id, filename);
+ }
+
+ if(unlikely(cgroup_enable_blkio_ops && !cg->io_serviced.filename)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_serviced", cgroup_blkio_base, cg->id);
+ if(likely(stat(filename, &buf) != -1)) {
+ cg->io_serviced.filename = strdupz(filename);
+ cg->io_serviced.enabled = cgroup_enable_blkio_ops;
+ debug(D_CGROUP, "io_serviced filename for cgroup '%s': '%s'", cg->id, cg->io_serviced.filename);
+ }
+ else
+ debug(D_CGROUP, "io_serviced file for cgroup '%s': '%s' does not exist.", cg->id, filename);
+ }
+
+ if(unlikely(cgroup_enable_blkio_throttle_io && !cg->throttle_io_service_bytes.filename)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/blkio.throttle.io_service_bytes", cgroup_blkio_base, cg->id);
+ if(likely(stat(filename, &buf) != -1)) {
+ cg->throttle_io_service_bytes.filename = strdupz(filename);
+ cg->throttle_io_service_bytes.enabled = cgroup_enable_blkio_throttle_io;
+ debug(D_CGROUP, "throttle_io_service_bytes filename for cgroup '%s': '%s'", cg->id, cg->throttle_io_service_bytes.filename);
+ }
+ else
+ debug(D_CGROUP, "throttle_io_service_bytes file for cgroup '%s': '%s' does not exist.", cg->id, filename);
+ }
+
+ if(unlikely(cgroup_enable_blkio_throttle_ops && !cg->throttle_io_serviced.filename)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/blkio.throttle.io_serviced", cgroup_blkio_base, cg->id);
+ if(likely(stat(filename, &buf) != -1)) {
+ cg->throttle_io_serviced.filename = strdupz(filename);
+ cg->throttle_io_serviced.enabled = cgroup_enable_blkio_throttle_ops;
+ debug(D_CGROUP, "throttle_io_serviced filename for cgroup '%s': '%s'", cg->id, cg->throttle_io_serviced.filename);
+ }
+ else
+ debug(D_CGROUP, "throttle_io_serviced file for cgroup '%s': '%s' does not exist.", cg->id, filename);
+ }
+
+ if(unlikely(cgroup_enable_blkio_merged_ops && !cg->io_merged.filename)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_merged", cgroup_blkio_base, cg->id);
+ if(likely(stat(filename, &buf) != -1)) {
+ cg->io_merged.filename = strdupz(filename);
+ cg->io_merged.enabled = cgroup_enable_blkio_merged_ops;
+ debug(D_CGROUP, "io_merged filename for cgroup '%s': '%s'", cg->id, cg->io_merged.filename);
+ }
+ else
+ debug(D_CGROUP, "io_merged file for cgroup '%s': '%s' does not exist.", cg->id, filename);
+ }
+
+ if(unlikely(cgroup_enable_blkio_queued_ops && !cg->io_queued.filename)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_queued", cgroup_blkio_base, cg->id);
+ if(likely(stat(filename, &buf) != -1)) {
+ cg->io_queued.filename = strdupz(filename);
+ cg->io_queued.enabled = cgroup_enable_blkio_queued_ops;
+ debug(D_CGROUP, "io_queued filename for cgroup '%s': '%s'", cg->id, cg->io_queued.filename);
+ }
+ else
+ debug(D_CGROUP, "io_queued file for cgroup '%s': '%s' does not exist.", cg->id, filename);
+ }
+ }
+
+ debug(D_CGROUP, "done searching for cgroups");
+}
+
+// ----------------------------------------------------------------------------
+// generate charts
+
+#define CHART_TITLE_MAX 300
+
+void update_systemd_services_charts(
+ int update_every
+ , int do_cpu
+ , int do_mem_usage
+ , int do_mem_detailed
+ , int do_mem_failcnt
+ , int do_swap_usage
+ , int do_io
+ , int do_io_ops
+ , int do_throttle_io
+ , int do_throttle_ops
+ , int do_queued_ops
+ , int do_merged_ops
+) {
+ static RRDSET
+ *st_cpu = NULL,
+ *st_mem_usage = NULL,
+ *st_mem_failcnt = NULL,
+ *st_swap_usage = NULL,
+
+ *st_mem_detailed_cache = NULL,
+ *st_mem_detailed_rss = NULL,
+ *st_mem_detailed_mapped = NULL,
+ *st_mem_detailed_writeback = NULL,
+ *st_mem_detailed_pgfault = NULL,
+ *st_mem_detailed_pgmajfault = NULL,
+ *st_mem_detailed_pgpgin = NULL,
+ *st_mem_detailed_pgpgout = NULL,
+
+ *st_io_read = NULL,
+ *st_io_serviced_read = NULL,
+ *st_throttle_io_read = NULL,
+ *st_throttle_ops_read = NULL,
+ *st_queued_ops_read = NULL,
+ *st_merged_ops_read = NULL,
+
+ *st_io_write = NULL,
+ *st_io_serviced_write = NULL,
+ *st_throttle_io_write = NULL,
+ *st_throttle_ops_write = NULL,
+ *st_queued_ops_write = NULL,
+ *st_merged_ops_write = NULL;
+
+ // create the charts
+
+ if(likely(do_cpu)) {
+ if(unlikely(!st_cpu)) {
+ char title[CHART_TITLE_MAX + 1];
+ snprintfz(title, CHART_TITLE_MAX, "Systemd Services CPU utilization (%d%% = %d core%s)", (processors * 100), processors, (processors > 1) ? "s" : "");
+
+ st_cpu = rrdset_create_localhost(
+ "services"
+ , "cpu"
+ , NULL
+ , "cpu"
+ , "services.cpu"
+ , title
+ , "%"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_cpu);
+ }
+
+ if(likely(do_mem_usage)) {
+ if(unlikely(!st_mem_usage)) {
+
+ st_mem_usage = rrdset_create_localhost(
+ "services"
+ , "mem_usage"
+ , NULL
+ , "mem"
+ , "services.mem_usage"
+ , (cgroup_used_memory_without_cache) ? "Systemd Services Used Memory without Cache"
+ : "Systemd Services Used Memory"
+ , "MB"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 10
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_mem_usage);
+ }
+
+ if(likely(do_mem_detailed)) {
+ if(unlikely(!st_mem_detailed_rss)) {
+
+ st_mem_detailed_rss = rrdset_create_localhost(
+ "services"
+ , "mem_rss"
+ , NULL
+ , "mem"
+ , "services.mem_rss"
+ , "Systemd Services RSS Memory"
+ , "MB"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 20
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_mem_detailed_rss);
+
+ if(unlikely(!st_mem_detailed_mapped)) {
+
+ st_mem_detailed_mapped = rrdset_create_localhost(
+ "services"
+ , "mem_mapped"
+ , NULL
+ , "mem"
+ , "services.mem_mapped"
+ , "Systemd Services Mapped Memory"
+ , "MB"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 30
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_mem_detailed_mapped);
+
+ if(unlikely(!st_mem_detailed_cache)) {
+
+ st_mem_detailed_cache = rrdset_create_localhost(
+ "services"
+ , "mem_cache"
+ , NULL
+ , "mem"
+ , "services.mem_cache"
+ , "Systemd Services Cache Memory"
+ , "MB"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 40
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_mem_detailed_cache);
+
+ if(unlikely(!st_mem_detailed_writeback)) {
+
+ st_mem_detailed_writeback = rrdset_create_localhost(
+ "services"
+ , "mem_writeback"
+ , NULL
+ , "mem"
+ , "services.mem_writeback"
+ , "Systemd Services Writeback Memory"
+ , "MB"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 50
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_mem_detailed_writeback);
+
+ if(unlikely(!st_mem_detailed_pgfault)) {
+
+ st_mem_detailed_pgfault = rrdset_create_localhost(
+ "services"
+ , "mem_pgfault"
+ , NULL
+ , "mem"
+ , "services.mem_pgfault"
+ , "Systemd Services Memory Minor Page Faults"
+ , "MB/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 60
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+ }
+ else
+ rrdset_next(st_mem_detailed_pgfault);
+
+ if(unlikely(!st_mem_detailed_pgmajfault)) {
+
+ st_mem_detailed_pgmajfault = rrdset_create_localhost(
+ "services"
+ , "mem_pgmajfault"
+ , NULL
+ , "mem"
+ , "services.mem_pgmajfault"
+ , "Systemd Services Memory Major Page Faults"
+ , "MB/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 70
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_mem_detailed_pgmajfault);
+
+ if(unlikely(!st_mem_detailed_pgpgin)) {
+
+ st_mem_detailed_pgpgin = rrdset_create_localhost(
+ "services"
+ , "mem_pgpgin"
+ , NULL
+ , "mem"
+ , "services.mem_pgpgin"
+ , "Systemd Services Memory Charging Activity"
+ , "MB/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 80
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_mem_detailed_pgpgin);
+
+ if(unlikely(!st_mem_detailed_pgpgout)) {
+
+ st_mem_detailed_pgpgout = rrdset_create_localhost(
+ "services"
+ , "mem_pgpgout"
+ , NULL
+ , "mem"
+ , "services.mem_pgpgout"
+ , "Systemd Services Memory Uncharging Activity"
+ , "MB/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 90
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_mem_detailed_pgpgout);
+ }
+
+ if(likely(do_mem_failcnt)) {
+ if(unlikely(!st_mem_failcnt)) {
+
+ st_mem_failcnt = rrdset_create_localhost(
+ "services"
+ , "mem_failcnt"
+ , NULL
+ , "mem"
+ , "services.mem_failcnt"
+ , "Systemd Services Memory Limit Failures"
+ , "MB"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 110
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_mem_failcnt);
+ }
+
+ if(likely(do_swap_usage)) {
+ if(unlikely(!st_swap_usage)) {
+
+ st_swap_usage = rrdset_create_localhost(
+ "services"
+ , "swap_usage"
+ , NULL
+ , "swap"
+ , "services.swap_usage"
+ , "Systemd Services Swap Memory Used"
+ , "MB"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 100
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_swap_usage);
+ }
+
+ if(likely(do_io)) {
+ if(unlikely(!st_io_read)) {
+
+ st_io_read = rrdset_create_localhost(
+ "services"
+ , "io_read"
+ , NULL
+ , "disk"
+ , "services.io_read"
+ , "Systemd Services Disk Read Bandwidth"
+ , "KB/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 120
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_io_read);
+
+ if(unlikely(!st_io_write)) {
+
+ st_io_write = rrdset_create_localhost(
+ "services"
+ , "io_write"
+ , NULL
+ , "disk"
+ , "services.io_write"
+ , "Systemd Services Disk Write Bandwidth"
+ , "KB/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 130
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_io_write);
+ }
+
+ if(likely(do_io_ops)) {
+ if(unlikely(!st_io_serviced_read)) {
+
+ st_io_serviced_read = rrdset_create_localhost(
+ "services"
+ , "io_ops_read"
+ , NULL
+ , "disk"
+ , "services.io_ops_read"
+ , "Systemd Services Disk Read Operations"
+ , "operations/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 140
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_io_serviced_read);
+
+ if(unlikely(!st_io_serviced_write)) {
+
+ st_io_serviced_write = rrdset_create_localhost(
+ "services"
+ , "io_ops_write"
+ , NULL
+ , "disk"
+ , "services.io_ops_write"
+ , "Systemd Services Disk Write Operations"
+ , "operations/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 150
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_io_serviced_write);
+ }
+
+ if(likely(do_throttle_io)) {
+ if(unlikely(!st_throttle_io_read)) {
+
+ st_throttle_io_read = rrdset_create_localhost(
+ "services"
+ , "throttle_io_read"
+ , NULL
+ , "disk"
+ , "services.throttle_io_read"
+ , "Systemd Services Throttle Disk Read Bandwidth"
+ , "KB/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 160
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_throttle_io_read);
+
+ if(unlikely(!st_throttle_io_write)) {
+
+ st_throttle_io_write = rrdset_create_localhost(
+ "services"
+ , "throttle_io_write"
+ , NULL
+ , "disk"
+ , "services.throttle_io_write"
+ , "Systemd Services Throttle Disk Write Bandwidth"
+ , "KB/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 170
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_throttle_io_write);
+ }
+
+ if(likely(do_throttle_ops)) {
+ if(unlikely(!st_throttle_ops_read)) {
+
+ st_throttle_ops_read = rrdset_create_localhost(
+ "services"
+ , "throttle_io_ops_read"
+ , NULL
+ , "disk"
+ , "services.throttle_io_ops_read"
+ , "Systemd Services Throttle Disk Read Operations"
+ , "operations/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 180
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_throttle_ops_read);
+
+ if(unlikely(!st_throttle_ops_write)) {
+
+ st_throttle_ops_write = rrdset_create_localhost(
+ "services"
+ , "throttle_io_ops_write"
+ , NULL
+ , "disk"
+ , "services.throttle_io_ops_write"
+ , "Systemd Services Throttle Disk Write Operations"
+ , "operations/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 190
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_throttle_ops_write);
+ }
+
+ if(likely(do_queued_ops)) {
+ if(unlikely(!st_queued_ops_read)) {
+
+ st_queued_ops_read = rrdset_create_localhost(
+ "services"
+ , "queued_io_ops_read"
+ , NULL
+ , "disk"
+ , "services.queued_io_ops_read"
+ , "Systemd Services Queued Disk Read Operations"
+ , "operations/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 200
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_queued_ops_read);
+
+ if(unlikely(!st_queued_ops_write)) {
+
+ st_queued_ops_write = rrdset_create_localhost(
+ "services"
+ , "queued_io_ops_write"
+ , NULL
+ , "disk"
+ , "services.queued_io_ops_write"
+ , "Systemd Services Queued Disk Write Operations"
+ , "operations/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 210
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_queued_ops_write);
+ }
+
+ if(likely(do_merged_ops)) {
+ if(unlikely(!st_merged_ops_read)) {
+
+ st_merged_ops_read = rrdset_create_localhost(
+ "services"
+ , "merged_io_ops_read"
+ , NULL
+ , "disk"
+ , "services.merged_io_ops_read"
+ , "Systemd Services Merged Disk Read Operations"
+ , "operations/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 220
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_merged_ops_read);
+
+ if(unlikely(!st_merged_ops_write)) {
+
+ st_merged_ops_write = rrdset_create_localhost(
+ "services"
+ , "merged_io_ops_write"
+ , NULL
+ , "disk"
+ , "services.merged_io_ops_write"
+ , "Systemd Services Merged Disk Write Operations"
+ , "operations/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 230
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ }
+ else
+ rrdset_next(st_merged_ops_write);
+ }
+
+ // update the values
+ struct cgroup *cg;
+ for(cg = cgroup_root; cg ; cg = cg->next) {
+ if(unlikely(!cg->available || !cg->enabled || !(cg->options & CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE)))
+ continue;
+
+ if(likely(do_cpu && cg->cpuacct_stat.updated)) {
+ if(unlikely(!cg->rd_cpu))
+ cg->rd_cpu = rrddim_add(st_cpu, cg->chart_id, cg->chart_title, 100, system_hz, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st_cpu, cg->rd_cpu, cg->cpuacct_stat.user + cg->cpuacct_stat.system);
+ }
+
+ if(likely(do_mem_usage && cg->memory.updated_usage_in_bytes)) {
+ if(unlikely(!cg->rd_mem_usage))
+ cg->rd_mem_usage = rrddim_add(st_mem_usage, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+
+ rrddim_set_by_pointer(st_mem_usage, cg->rd_mem_usage, cg->memory.usage_in_bytes - ((cgroup_used_memory_without_cache)?cg->memory.cache:0));
+ }
+
+ if(likely(do_mem_detailed && cg->memory.updated_detailed)) {
+ if(unlikely(!cg->rd_mem_detailed_rss))
+ cg->rd_mem_detailed_rss = rrddim_add(st_mem_detailed_rss, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+
+ rrddim_set_by_pointer(st_mem_detailed_rss, cg->rd_mem_detailed_rss, cg->memory.rss + cg->memory.rss_huge);
+
+ if(unlikely(!cg->rd_mem_detailed_mapped))
+ cg->rd_mem_detailed_mapped = rrddim_add(st_mem_detailed_mapped, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+
+ rrddim_set_by_pointer(st_mem_detailed_mapped, cg->rd_mem_detailed_mapped, cg->memory.mapped_file);
+
+ if(unlikely(!cg->rd_mem_detailed_cache))
+ cg->rd_mem_detailed_cache = rrddim_add(st_mem_detailed_cache, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+
+ rrddim_set_by_pointer(st_mem_detailed_cache, cg->rd_mem_detailed_cache, cg->memory.cache);
+
+ if(unlikely(!cg->rd_mem_detailed_writeback))
+ cg->rd_mem_detailed_writeback = rrddim_add(st_mem_detailed_writeback, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+
+ rrddim_set_by_pointer(st_mem_detailed_writeback, cg->rd_mem_detailed_writeback, cg->memory.writeback);
+
+ if(unlikely(!cg->rd_mem_detailed_pgfault))
+ cg->rd_mem_detailed_pgfault = rrddim_add(st_mem_detailed_pgfault, cg->chart_id, cg->chart_title, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st_mem_detailed_pgfault, cg->rd_mem_detailed_pgfault, cg->memory.pgfault);
+
+ if(unlikely(!cg->rd_mem_detailed_pgmajfault))
+ cg->rd_mem_detailed_pgmajfault = rrddim_add(st_mem_detailed_pgmajfault, cg->chart_id, cg->chart_title, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st_mem_detailed_pgmajfault, cg->rd_mem_detailed_pgmajfault, cg->memory.pgmajfault);
+
+ if(unlikely(!cg->rd_mem_detailed_pgpgin))
+ cg->rd_mem_detailed_pgpgin = rrddim_add(st_mem_detailed_pgpgin, cg->chart_id, cg->chart_title, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st_mem_detailed_pgpgin, cg->rd_mem_detailed_pgpgin, cg->memory.pgpgin);
+
+ if(unlikely(!cg->rd_mem_detailed_pgpgout))
+ cg->rd_mem_detailed_pgpgout = rrddim_add(st_mem_detailed_pgpgout, cg->chart_id, cg->chart_title, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st_mem_detailed_pgpgout, cg->rd_mem_detailed_pgpgout, cg->memory.pgpgout);
+ }
+
+ if(likely(do_mem_failcnt && cg->memory.updated_failcnt)) {
+ if(unlikely(!cg->rd_mem_failcnt))
+ cg->rd_mem_failcnt = rrddim_add(st_mem_failcnt, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st_mem_failcnt, cg->rd_mem_failcnt, cg->memory.failcnt);
+ }
+
+ if(likely(do_swap_usage && cg->memory.updated_msw_usage_in_bytes)) {
+ if(unlikely(!cg->rd_swap_usage))
+ cg->rd_swap_usage = rrddim_add(st_swap_usage, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+
+ rrddim_set_by_pointer(st_swap_usage, cg->rd_swap_usage, cg->memory.msw_usage_in_bytes);
+ }
+
+ if(likely(do_io && cg->io_service_bytes.updated)) {
+ if(unlikely(!cg->rd_io_service_bytes_read))
+ cg->rd_io_service_bytes_read = rrddim_add(st_io_read, cg->chart_id, cg->chart_title, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st_io_read, cg->rd_io_service_bytes_read, cg->io_service_bytes.Read);
+
+ if(unlikely(!cg->rd_io_service_bytes_write))
+ cg->rd_io_service_bytes_write = rrddim_add(st_io_write, cg->chart_id, cg->chart_title, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st_io_write, cg->rd_io_service_bytes_write, cg->io_service_bytes.Write);
+ }
+
+ if(likely(do_io_ops && cg->io_serviced.updated)) {
+ if(unlikely(!cg->rd_io_serviced_read))
+ cg->rd_io_serviced_read = rrddim_add(st_io_serviced_read, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st_io_serviced_read, cg->rd_io_serviced_read, cg->io_serviced.Read);
+
+ if(unlikely(!cg->rd_io_serviced_write))
+ cg->rd_io_serviced_write = rrddim_add(st_io_serviced_write, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st_io_serviced_write, cg->rd_io_serviced_write, cg->io_serviced.Write);
+ }
+
+ if(likely(do_throttle_io && cg->throttle_io_service_bytes.updated)) {
+ if(unlikely(!cg->rd_throttle_io_read))
+ cg->rd_throttle_io_read = rrddim_add(st_throttle_io_read, cg->chart_id, cg->chart_title, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st_throttle_io_read, cg->rd_throttle_io_read, cg->throttle_io_service_bytes.Read);
+
+ if(unlikely(!cg->rd_throttle_io_write))
+ cg->rd_throttle_io_write = rrddim_add(st_throttle_io_write, cg->chart_id, cg->chart_title, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st_throttle_io_write, cg->rd_throttle_io_write, cg->throttle_io_service_bytes.Write);
+ }
+
+ if(likely(do_throttle_ops && cg->throttle_io_serviced.updated)) {
+ if(unlikely(!cg->rd_throttle_io_serviced_read))
+ cg->rd_throttle_io_serviced_read = rrddim_add(st_throttle_ops_read, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st_throttle_ops_read, cg->rd_throttle_io_serviced_read, cg->throttle_io_serviced.Read);
+
+ if(unlikely(!cg->rd_throttle_io_serviced_write))
+ cg->rd_throttle_io_serviced_write = rrddim_add(st_throttle_ops_write, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st_throttle_ops_write, cg->rd_throttle_io_serviced_write, cg->throttle_io_serviced.Write);
+ }
+
+ if(likely(do_queued_ops && cg->io_queued.updated)) {
+ if(unlikely(!cg->rd_io_queued_read))
+ cg->rd_io_queued_read = rrddim_add(st_queued_ops_read, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st_queued_ops_read, cg->rd_io_queued_read, cg->io_queued.Read);
+
+ if(unlikely(!cg->rd_io_queued_write))
+ cg->rd_io_queued_write = rrddim_add(st_queued_ops_write, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st_queued_ops_write, cg->rd_io_queued_write, cg->io_queued.Write);
+ }
+
+ if(likely(do_merged_ops && cg->io_merged.updated)) {
+ if(unlikely(!cg->rd_io_merged_read))
+ cg->rd_io_merged_read = rrddim_add(st_merged_ops_read, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st_merged_ops_read, cg->rd_io_merged_read, cg->io_merged.Read);
+
+ if(unlikely(!cg->rd_io_merged_write))
+ cg->rd_io_merged_write = rrddim_add(st_merged_ops_write, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st_merged_ops_write, cg->rd_io_merged_write, cg->io_merged.Write);
+ }
+ }
+
+ // complete the iteration
+ if(likely(do_cpu))
+ rrdset_done(st_cpu);
+
+ if(likely(do_mem_usage))
+ rrdset_done(st_mem_usage);
+
+ if(unlikely(do_mem_detailed)) {
+ rrdset_done(st_mem_detailed_cache);
+ rrdset_done(st_mem_detailed_rss);
+ rrdset_done(st_mem_detailed_mapped);
+ rrdset_done(st_mem_detailed_writeback);
+ rrdset_done(st_mem_detailed_pgfault);
+ rrdset_done(st_mem_detailed_pgmajfault);
+ rrdset_done(st_mem_detailed_pgpgin);
+ rrdset_done(st_mem_detailed_pgpgout);
+ }
+
+ if(likely(do_mem_failcnt))
+ rrdset_done(st_mem_failcnt);
+
+ if(likely(do_swap_usage))
+ rrdset_done(st_swap_usage);
+
+ if(likely(do_io)) {
+ rrdset_done(st_io_read);
+ rrdset_done(st_io_write);
+ }
+
+ if(likely(do_io_ops)) {
+ rrdset_done(st_io_serviced_read);
+ rrdset_done(st_io_serviced_write);
+ }
+
+ if(likely(do_throttle_io)) {
+ rrdset_done(st_throttle_io_read);
+ rrdset_done(st_throttle_io_write);
+ }
+
+ if(likely(do_throttle_ops)) {
+ rrdset_done(st_throttle_ops_read);
+ rrdset_done(st_throttle_ops_write);
+ }
+
+ if(likely(do_queued_ops)) {
+ rrdset_done(st_queued_ops_read);
+ rrdset_done(st_queued_ops_write);
+ }
+
+ if(likely(do_merged_ops)) {
+ rrdset_done(st_merged_ops_read);
+ rrdset_done(st_merged_ops_write);
+ }
+}
+
+static inline char *cgroup_chart_type(char *buffer, const char *id, size_t len) {
+ if(buffer[0]) return buffer;
+
+ if(id[0] == '\0' || (id[0] == '/' && id[1] == '\0'))
+ strncpy(buffer, "cgroup_root", len);
+ else
+ snprintfz(buffer, len, "cgroup_%s", id);
+
+ netdata_fix_chart_id(buffer);
+ return buffer;
+}
+
+void update_cgroup_charts(int update_every) {
+ debug(D_CGROUP, "updating cgroups charts");
+
+ char type[RRD_ID_LENGTH_MAX + 1];
+ char title[CHART_TITLE_MAX + 1];
+
+ int services_do_cpu = 0,
+ services_do_mem_usage = 0,
+ services_do_mem_detailed = 0,
+ services_do_mem_failcnt = 0,
+ services_do_swap_usage = 0,
+ services_do_io = 0,
+ services_do_io_ops = 0,
+ services_do_throttle_io = 0,
+ services_do_throttle_ops = 0,
+ services_do_queued_ops = 0,
+ services_do_merged_ops = 0;
+
+ struct cgroup *cg;
+ for(cg = cgroup_root; cg ; cg = cg->next) {
+ if(unlikely(!cg->available || !cg->enabled))
+ continue;
+
+ if(likely(cgroup_enable_systemd_services && cg->options & CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE)) {
+ if(cg->cpuacct_stat.updated && cg->cpuacct_stat.enabled == CONFIG_BOOLEAN_YES) services_do_cpu++;
+
+ if(cgroup_enable_systemd_services_detailed_memory && cg->memory.updated_detailed && cg->memory.enabled_detailed) services_do_mem_detailed++;
+ if(cg->memory.updated_usage_in_bytes && cg->memory.enabled_usage_in_bytes == CONFIG_BOOLEAN_YES) services_do_mem_usage++;
+ if(cg->memory.updated_failcnt && cg->memory.enabled_failcnt == CONFIG_BOOLEAN_YES) services_do_mem_failcnt++;
+ if(cg->memory.updated_msw_usage_in_bytes && cg->memory.enabled_msw_usage_in_bytes == CONFIG_BOOLEAN_YES) services_do_swap_usage++;
+
+ if(cg->io_service_bytes.updated && cg->io_service_bytes.enabled == CONFIG_BOOLEAN_YES) services_do_io++;
+ if(cg->io_serviced.updated && cg->io_serviced.enabled == CONFIG_BOOLEAN_YES) services_do_io_ops++;
+ if(cg->throttle_io_service_bytes.updated && cg->throttle_io_service_bytes.enabled == CONFIG_BOOLEAN_YES) services_do_throttle_io++;
+ if(cg->throttle_io_serviced.updated && cg->throttle_io_serviced.enabled == CONFIG_BOOLEAN_YES) services_do_throttle_ops++;
+ if(cg->io_queued.updated && cg->io_queued.enabled == CONFIG_BOOLEAN_YES) services_do_queued_ops++;
+ if(cg->io_merged.updated && cg->io_merged.enabled == CONFIG_BOOLEAN_YES) services_do_merged_ops++;
+ continue;
+ }
+
+ type[0] = '\0';
+
+ if(likely(cg->cpuacct_stat.updated && cg->cpuacct_stat.enabled == CONFIG_BOOLEAN_YES)) {
+ if(unlikely(!cg->st_cpu)) {
+ snprintfz(title, CHART_TITLE_MAX, "CPU Usage (%d%% = %d core%s) for cgroup %s", (processors * 100), processors, (processors > 1) ? "s" : "", cg->chart_title);
+
+ cg->st_cpu = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "cpu"
+ , NULL
+ , "cpu"
+ , "cgroup.cpu"
+ , title
+ , "%"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_CONTAINERS
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rrddim_add(cg->st_cpu, "user", NULL, 100, system_hz, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(cg->st_cpu, "system", NULL, 100, system_hz, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(cg->st_cpu);
+
+ rrddim_set(cg->st_cpu, "user", cg->cpuacct_stat.user);
+ rrddim_set(cg->st_cpu, "system", cg->cpuacct_stat.system);
+ rrdset_done(cg->st_cpu);
+ }
+
+ if(likely(cg->cpuacct_usage.updated && cg->cpuacct_usage.enabled == CONFIG_BOOLEAN_YES)) {
+ char id[RRD_ID_LENGTH_MAX + 1];
+ unsigned int i;
+
+ if(unlikely(!cg->st_cpu_per_core)) {
+ snprintfz(title, CHART_TITLE_MAX, "CPU Usage (%d%% = %d core%s) Per Core for cgroup %s", (processors * 100), processors, (processors > 1) ? "s" : "", cg->chart_title);
+
+ cg->st_cpu_per_core = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "cpu_per_core"
+ , NULL
+ , "cpu"
+ , "cgroup.cpu_per_core"
+ , title
+ , "%"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 100
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ for(i = 0; i < cg->cpuacct_usage.cpus; i++) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "cpu%u", i);
+ rrddim_add(cg->st_cpu_per_core, id, NULL, 100, 1000000000, RRD_ALGORITHM_INCREMENTAL);
+ }
+ }
+ else
+ rrdset_next(cg->st_cpu_per_core);
+
+ for(i = 0; i < cg->cpuacct_usage.cpus ;i++) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "cpu%u", i);
+ rrddim_set(cg->st_cpu_per_core, id, cg->cpuacct_usage.cpu_percpu[i]);
+ }
+ rrdset_done(cg->st_cpu_per_core);
+ }
+
+ if(likely(cg->memory.updated_detailed && cg->memory.enabled_detailed == CONFIG_BOOLEAN_YES)) {
+ if(unlikely(!cg->st_mem)) {
+ snprintfz(title, CHART_TITLE_MAX, "Memory Usage for cgroup %s", cg->chart_title);
+
+ cg->st_mem = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "mem"
+ , NULL
+ , "mem"
+ , "cgroup.mem"
+ , title
+ , "MB"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 210
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rrddim_add(cg->st_mem, "cache", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(cg->st_mem, "rss", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+
+ if(cg->memory.detailed_has_swap)
+ rrddim_add(cg->st_mem, "swap", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+
+ rrddim_add(cg->st_mem, "rss_huge", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(cg->st_mem, "mapped_file", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(cg->st_mem);
+
+ rrddim_set(cg->st_mem, "cache", cg->memory.cache);
+ rrddim_set(cg->st_mem, "rss", (cg->memory.rss > cg->memory.rss_huge)?(cg->memory.rss - cg->memory.rss_huge):0);
+
+ if(cg->memory.detailed_has_swap)
+ rrddim_set(cg->st_mem, "swap", cg->memory.swap);
+
+ rrddim_set(cg->st_mem, "rss_huge", cg->memory.rss_huge);
+ rrddim_set(cg->st_mem, "mapped_file", cg->memory.mapped_file);
+ rrdset_done(cg->st_mem);
+
+ if(unlikely(!cg->st_writeback)) {
+ snprintfz(title, CHART_TITLE_MAX, "Writeback Memory for cgroup %s", cg->chart_title);
+
+ cg->st_writeback = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "writeback"
+ , NULL
+ , "mem"
+ , "cgroup.writeback"
+ , title
+ , "MB"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 300
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ if(cg->memory.detailed_has_dirty)
+ rrddim_add(cg->st_writeback, "dirty", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+
+ rrddim_add(cg->st_writeback, "writeback", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(cg->st_writeback);
+
+ if(cg->memory.detailed_has_dirty)
+ rrddim_set(cg->st_writeback, "dirty", cg->memory.dirty);
+
+ rrddim_set(cg->st_writeback, "writeback", cg->memory.writeback);
+ rrdset_done(cg->st_writeback);
+
+ if(unlikely(!cg->st_mem_activity)) {
+ snprintfz(title, CHART_TITLE_MAX, "Memory Activity for cgroup %s", cg->chart_title);
+
+ cg->st_mem_activity = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "mem_activity"
+ , NULL
+ , "mem"
+ , "cgroup.mem_activity"
+ , title
+ , "MB/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 400
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(cg->st_mem_activity, "pgpgin", "in", system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(cg->st_mem_activity, "pgpgout", "out", -system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(cg->st_mem_activity);
+
+ rrddim_set(cg->st_mem_activity, "pgpgin", cg->memory.pgpgin);
+ rrddim_set(cg->st_mem_activity, "pgpgout", cg->memory.pgpgout);
+ rrdset_done(cg->st_mem_activity);
+
+ if(unlikely(!cg->st_pgfaults)) {
+ snprintfz(title, CHART_TITLE_MAX, "Memory Page Faults for cgroup %s", cg->chart_title);
+
+ cg->st_pgfaults = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "pgfaults"
+ , NULL
+ , "mem"
+ , "cgroup.pgfaults"
+ , title
+ , "MB/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 500
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(cg->st_pgfaults, "pgfault", NULL, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(cg->st_pgfaults, "pgmajfault", "swap", -system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(cg->st_pgfaults);
+
+ rrddim_set(cg->st_pgfaults, "pgfault", cg->memory.pgfault);
+ rrddim_set(cg->st_pgfaults, "pgmajfault", cg->memory.pgmajfault);
+ rrdset_done(cg->st_pgfaults);
+ }
+
+ if(likely(cg->memory.updated_usage_in_bytes && cg->memory.enabled_usage_in_bytes == CONFIG_BOOLEAN_YES)) {
+ if(unlikely(!cg->st_mem_usage)) {
+ snprintfz(title, CHART_TITLE_MAX, "Used Memory %sfor cgroup %s", (cgroup_used_memory_without_cache && cg->memory.updated_detailed)?"without Cache ":"", cg->chart_title);
+
+ cg->st_mem_usage = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "mem_usage"
+ , NULL
+ , "mem"
+ , "cgroup.mem_usage"
+ , title
+ , "MB"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 200
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rrddim_add(cg->st_mem_usage, "ram", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(cg->st_mem_usage, "swap", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(cg->st_mem_usage);
+
+ rrddim_set(cg->st_mem_usage, "ram", cg->memory.usage_in_bytes - ((cgroup_used_memory_without_cache)?cg->memory.cache:0));
+ rrddim_set(cg->st_mem_usage, "swap", (cg->memory.msw_usage_in_bytes > cg->memory.usage_in_bytes)?cg->memory.msw_usage_in_bytes - cg->memory.usage_in_bytes:0);
+ rrdset_done(cg->st_mem_usage);
+ }
+
+ if(likely(cg->memory.updated_failcnt && cg->memory.enabled_failcnt == CONFIG_BOOLEAN_YES)) {
+ if(unlikely(!cg->st_mem_failcnt)) {
+ snprintfz(title, CHART_TITLE_MAX, "Memory Limit Failures for cgroup %s", cg->chart_title);
+
+ cg->st_mem_failcnt = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "mem_failcnt"
+ , NULL
+ , "mem"
+ , "cgroup.mem_failcnt"
+ , title
+ , "count"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 250
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(cg->st_mem_failcnt, "failures", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(cg->st_mem_failcnt);
+
+ rrddim_set(cg->st_mem_failcnt, "failures", cg->memory.failcnt);
+ rrdset_done(cg->st_mem_failcnt);
+ }
+
+ if(likely(cg->io_service_bytes.updated && cg->io_service_bytes.enabled == CONFIG_BOOLEAN_YES)) {
+ if(unlikely(!cg->st_io)) {
+ snprintfz(title, CHART_TITLE_MAX, "I/O Bandwidth (all disks) for cgroup %s", cg->chart_title);
+
+ cg->st_io = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "io"
+ , NULL
+ , "disk"
+ , "cgroup.io"
+ , title
+ , "KB/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 1200
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rrddim_add(cg->st_io, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(cg->st_io, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(cg->st_io);
+
+ rrddim_set(cg->st_io, "read", cg->io_service_bytes.Read);
+ rrddim_set(cg->st_io, "write", cg->io_service_bytes.Write);
+ rrdset_done(cg->st_io);
+ }
+
+ if(likely(cg->io_serviced.updated && cg->io_serviced.enabled == CONFIG_BOOLEAN_YES)) {
+ if(unlikely(!cg->st_serviced_ops)) {
+ snprintfz(title, CHART_TITLE_MAX, "Serviced I/O Operations (all disks) for cgroup %s", cg->chart_title);
+
+ cg->st_serviced_ops = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "serviced_ops"
+ , NULL
+ , "disk"
+ , "cgroup.serviced_ops"
+ , title
+ , "operations/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 1200
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(cg->st_serviced_ops, "read", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(cg->st_serviced_ops, "write", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(cg->st_serviced_ops);
+
+ rrddim_set(cg->st_serviced_ops, "read", cg->io_serviced.Read);
+ rrddim_set(cg->st_serviced_ops, "write", cg->io_serviced.Write);
+ rrdset_done(cg->st_serviced_ops);
+ }
+
+ if(likely(cg->throttle_io_service_bytes.updated && cg->throttle_io_service_bytes.enabled == CONFIG_BOOLEAN_YES)) {
+ if(unlikely(!cg->st_throttle_io)) {
+ snprintfz(title, CHART_TITLE_MAX, "Throttle I/O Bandwidth (all disks) for cgroup %s", cg->chart_title);
+
+ cg->st_throttle_io = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "throttle_io"
+ , NULL
+ , "disk"
+ , "cgroup.throttle_io"
+ , title
+ , "KB/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 1200
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rrddim_add(cg->st_throttle_io, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(cg->st_throttle_io, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(cg->st_throttle_io);
+
+ rrddim_set(cg->st_throttle_io, "read", cg->throttle_io_service_bytes.Read);
+ rrddim_set(cg->st_throttle_io, "write", cg->throttle_io_service_bytes.Write);
+ rrdset_done(cg->st_throttle_io);
+ }
+
+ if(likely(cg->throttle_io_serviced.updated && cg->throttle_io_serviced.enabled == CONFIG_BOOLEAN_YES)) {
+ if(unlikely(!cg->st_throttle_serviced_ops)) {
+ snprintfz(title, CHART_TITLE_MAX, "Throttle Serviced I/O Operations (all disks) for cgroup %s", cg->chart_title);
+
+ cg->st_throttle_serviced_ops = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "throttle_serviced_ops"
+ , NULL
+ , "disk"
+ , "cgroup.throttle_serviced_ops"
+ , title
+ , "operations/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 1200
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(cg->st_throttle_serviced_ops, "read", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(cg->st_throttle_serviced_ops, "write", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(cg->st_throttle_serviced_ops);
+
+ rrddim_set(cg->st_throttle_serviced_ops, "read", cg->throttle_io_serviced.Read);
+ rrddim_set(cg->st_throttle_serviced_ops, "write", cg->throttle_io_serviced.Write);
+ rrdset_done(cg->st_throttle_serviced_ops);
+ }
+
+ if(likely(cg->io_queued.updated && cg->io_queued.enabled == CONFIG_BOOLEAN_YES)) {
+ if(unlikely(!cg->st_queued_ops)) {
+ snprintfz(title, CHART_TITLE_MAX, "Queued I/O Operations (all disks) for cgroup %s", cg->chart_title);
+
+ cg->st_queued_ops = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "queued_ops"
+ , NULL
+ , "disk"
+ , "cgroup.queued_ops"
+ , title
+ , "operations"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 2000
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(cg->st_queued_ops, "read", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(cg->st_queued_ops, "write", NULL, -1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(cg->st_queued_ops);
+
+ rrddim_set(cg->st_queued_ops, "read", cg->io_queued.Read);
+ rrddim_set(cg->st_queued_ops, "write", cg->io_queued.Write);
+ rrdset_done(cg->st_queued_ops);
+ }
+
+ if(likely(cg->io_merged.updated && cg->io_merged.enabled == CONFIG_BOOLEAN_YES)) {
+ if(unlikely(!cg->st_merged_ops)) {
+ snprintfz(title, CHART_TITLE_MAX, "Merged I/O Operations (all disks) for cgroup %s", cg->chart_title);
+
+ cg->st_merged_ops = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "merged_ops"
+ , NULL
+ , "disk"
+ , "cgroup.merged_ops"
+ , title
+ , "operations/s"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 2100
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(cg->st_merged_ops, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(cg->st_merged_ops, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(cg->st_merged_ops);
+
+ rrddim_set(cg->st_merged_ops, "read", cg->io_merged.Read);
+ rrddim_set(cg->st_merged_ops, "write", cg->io_merged.Write);
+ rrdset_done(cg->st_merged_ops);
+ }
+ }
+
+ if(likely(cgroup_enable_systemd_services))
+ update_systemd_services_charts(update_every, services_do_cpu, services_do_mem_usage, services_do_mem_detailed
+ , services_do_mem_failcnt, services_do_swap_usage, services_do_io
+ , services_do_io_ops, services_do_throttle_io, services_do_throttle_ops
+ , services_do_queued_ops, services_do_merged_ops
+ );
+
+ debug(D_CGROUP, "done updating cgroups charts");
+}
+
+// ----------------------------------------------------------------------------
+// cgroups main
+
+static void cgroup_main_cleanup(void *ptr) {
+ struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
+
+ info("cleaning up...");
+
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+}
+
+void *cgroups_main(void *ptr) {
+ netdata_thread_cleanup_push(cgroup_main_cleanup, ptr);
+
+ struct rusage thread;
+
+ // when ZERO, attempt to do it
+ int vdo_cpu_netdata = config_get_boolean("plugin:cgroups", "cgroups plugin resource charts", 1);
+
+ read_cgroup_plugin_configuration();
+
+ RRDSET *stcpu_thread = NULL;
+
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ usec_t step = cgroup_update_every * USEC_PER_SEC;
+ usec_t find_every = cgroup_check_for_new_every * USEC_PER_SEC, find_dt = 0;
+
+ while(!netdata_exit) {
+ usec_t hb_dt = heartbeat_next(&hb, step);
+ if(unlikely(netdata_exit)) break;
+
+ // BEGIN -- the job to be done
+
+ find_dt += hb_dt;
+ if(unlikely(find_dt >= find_every || cgroups_check)) {
+ find_all_cgroups();
+ find_dt = 0;
+ cgroups_check = 0;
+ }
+
+ read_all_cgroups(cgroup_root);
+ update_cgroup_charts(cgroup_update_every);
+
+ // END -- the job is done
+
+ // --------------------------------------------------------------------
+
+ if(vdo_cpu_netdata) {
+ getrusage(RUSAGE_THREAD, &thread);
+
+ if(unlikely(!stcpu_thread)) {
+
+ stcpu_thread = rrdset_create_localhost(
+ "netdata"
+ , "plugin_cgroups_cpu"
+ , NULL
+ , "cgroups"
+ , NULL
+ , "NetData CGroups Plugin CPU usage"
+ , "milliseconds/s"
+ , PLUGIN_CGROUPS_NAME
+ , "stats"
+ , 132000
+ , cgroup_update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rrddim_add(stcpu_thread, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(stcpu_thread, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(stcpu_thread);
+
+ rrddim_set(stcpu_thread, "user" , thread.ru_utime.tv_sec * 1000000ULL + thread.ru_utime.tv_usec);
+ rrddim_set(stcpu_thread, "system", thread.ru_stime.tv_sec * 1000000ULL + thread.ru_stime.tv_usec);
+ rrdset_done(stcpu_thread);
+ }
+ }
+
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
diff --git a/collectors/cgroups.plugin/sys_fs_cgroup.h b/collectors/cgroups.plugin/sys_fs_cgroup.h
new file mode 100644
index 000000000..09ce5e3fb
--- /dev/null
+++ b/collectors/cgroups.plugin/sys_fs_cgroup.h
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_SYS_FS_CGROUP_H
+#define NETDATA_SYS_FS_CGROUP_H 1
+
+#include "../../daemon/common.h"
+
+#if (TARGET_OS == OS_LINUX)
+
+#define NETDATA_PLUGIN_HOOK_LINUX_CGROUPS \
+ { \
+ .name = "PLUGIN[cgroups]", \
+ .config_section = CONFIG_SECTION_PLUGINS, \
+ .config_name = "cgroups", \
+ .enabled = 1, \
+ .thread = NULL, \
+ .init_routine = NULL, \
+ .start_routine = cgroups_main \
+ },
+
+extern void *cgroups_main(void *ptr);
+
+#include "../proc.plugin/plugin_proc.h"
+
+#else // (TARGET_OS == OS_LINUX)
+
+#define NETDATA_PLUGIN_HOOK_LINUX_CGROUPS
+
+#endif // (TARGET_OS == OS_LINUX)
+
+#endif //NETDATA_SYS_FS_CGROUP_H
diff --git a/collectors/charts.d.plugin/Makefile.am b/collectors/charts.d.plugin/Makefile.am
new file mode 100644
index 000000000..e2e00258f
--- /dev/null
+++ b/collectors/charts.d.plugin/Makefile.am
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+CLEANFILES = \
+ charts.d.plugin \
+ $(NULL)
+
+include $(top_srcdir)/build/subst.inc
+SUFFIXES = .in
+
+dist_libconfig_DATA = \
+ charts.d.conf \
+ $(NULL)
+
+dist_plugins_SCRIPTS = \
+ charts.d.dryrun-helper.sh \
+ charts.d.plugin \
+ loopsleepms.sh.inc \
+ $(NULL)
+
+dist_noinst_DATA = \
+ charts.d.plugin.in \
+ README.md \
+ $(NULL)
+
+dist_charts_SCRIPTS = \
+ $(NULL)
+
+dist_charts_DATA = \
+ $(NULL)
+
+userchartsconfigdir=$(configdir)/charts.d
+dist_userchartsconfig_DATA = \
+ $(top_srcdir)/installer/.keep \
+ $(NULL)
+
+chartsconfigdir=$(libconfigdir)/charts.d
+dist_chartsconfig_DATA = \
+ $(top_srcdir)/installer/.keep \
+ $(NULL)
+
+include ap/Makefile.inc
+include apache/Makefile.inc
+include apcupsd/Makefile.inc
+include cpu_apps/Makefile.inc
+include cpufreq/Makefile.inc
+include example/Makefile.inc
+include exim/Makefile.inc
+include hddtemp/Makefile.inc
+include libreswan/Makefile.inc
+include load_average/Makefile.inc
+include mem_apps/Makefile.inc
+include mysql/Makefile.inc
+include nginx/Makefile.inc
+include nut/Makefile.inc
+include opensips/Makefile.inc
+include phpfpm/Makefile.inc
+include postfix/Makefile.inc
+include sensors/Makefile.inc
+include squid/Makefile.inc
+include tomcat/Makefile.inc
diff --git a/collectors/charts.d.plugin/Makefile.in b/collectors/charts.d.plugin/Makefile.in
new file mode 100644
index 000000000..23e2edebb
--- /dev/null
+++ b/collectors/charts.d.plugin/Makefile.in
@@ -0,0 +1,953 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+DIST_COMMON = $(top_srcdir)/build/subst.inc $(srcdir)/ap/Makefile.inc \
+ $(srcdir)/apache/Makefile.inc $(srcdir)/apcupsd/Makefile.inc \
+ $(srcdir)/cpu_apps/Makefile.inc $(srcdir)/cpufreq/Makefile.inc \
+ $(srcdir)/example/Makefile.inc $(srcdir)/exim/Makefile.inc \
+ $(srcdir)/hddtemp/Makefile.inc \
+ $(srcdir)/libreswan/Makefile.inc \
+ $(srcdir)/load_average/Makefile.inc \
+ $(srcdir)/mem_apps/Makefile.inc $(srcdir)/mysql/Makefile.inc \
+ $(srcdir)/nginx/Makefile.inc $(srcdir)/nut/Makefile.inc \
+ $(srcdir)/opensips/Makefile.inc $(srcdir)/phpfpm/Makefile.inc \
+ $(srcdir)/postfix/Makefile.inc $(srcdir)/sensors/Makefile.inc \
+ $(srcdir)/squid/Makefile.inc $(srcdir)/tomcat/Makefile.inc \
+ $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
+ $(dist_charts_SCRIPTS) $(dist_plugins_SCRIPTS) \
+ $(dist_charts_DATA) $(dist_chartsconfig_DATA) \
+ $(dist_libconfig_DATA) $(dist_noinst_DATA) \
+ $(dist_userchartsconfig_DATA)
+subdir = collectors/charts.d.plugin
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+ test -z "$$files" \
+ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+ $(am__cd) "$$dir" && rm -f $$files; }; \
+ }
+am__installdirs = "$(DESTDIR)$(chartsdir)" "$(DESTDIR)$(pluginsdir)" \
+ "$(DESTDIR)$(chartsdir)" "$(DESTDIR)$(chartsconfigdir)" \
+ "$(DESTDIR)$(libconfigdir)" "$(DESTDIR)$(userchartsconfigdir)"
+SCRIPTS = $(dist_charts_SCRIPTS) $(dist_plugins_SCRIPTS)
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_charts_DATA) $(dist_chartsconfig_DATA) \
+ $(dist_libconfig_DATA) $(dist_noinst_DATA) \
+ $(dist_userchartsconfig_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+CLEANFILES = \
+ charts.d.plugin \
+ $(NULL)
+
+SUFFIXES = .in
+dist_libconfig_DATA = \
+ charts.d.conf \
+ $(NULL)
+
+dist_plugins_SCRIPTS = \
+ charts.d.dryrun-helper.sh \
+ charts.d.plugin \
+ loopsleepms.sh.inc \
+ $(NULL)
+
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA = charts.d.plugin.in README.md $(NULL) ap/README.md \
+ ap/Makefile.inc apache/README.md apache/Makefile.inc \
+ apcupsd/README.md apcupsd/Makefile.inc cpu_apps/README.md \
+ cpu_apps/Makefile.inc cpufreq/README.md cpufreq/Makefile.inc \
+ example/README.md example/Makefile.inc exim/README.md \
+ exim/Makefile.inc hddtemp/README.md hddtemp/Makefile.inc \
+ libreswan/README.md libreswan/Makefile.inc \
+ load_average/README.md load_average/Makefile.inc \
+ mem_apps/README.md mem_apps/Makefile.inc mysql/README.md \
+ mysql/Makefile.inc nginx/README.md nginx/Makefile.inc \
+ nut/README.md nut/Makefile.inc opensips/README.md \
+ opensips/Makefile.inc phpfpm/README.md phpfpm/Makefile.inc \
+ postfix/README.md postfix/Makefile.inc sensors/README.md \
+ sensors/Makefile.inc squid/README.md squid/Makefile.inc \
+ tomcat/README.md tomcat/Makefile.inc
+dist_charts_SCRIPTS = \
+ $(NULL)
+
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+dist_charts_DATA = $(NULL) ap/ap.chart.sh apache/apache.chart.sh \
+ apcupsd/apcupsd.chart.sh cpu_apps/cpu_apps.chart.sh \
+ cpufreq/cpufreq.chart.sh example/example.chart.sh \
+ exim/exim.chart.sh hddtemp/hddtemp.chart.sh \
+ libreswan/libreswan.chart.sh \
+ load_average/load_average.chart.sh mem_apps/mem_apps.chart.sh \
+ mysql/mysql.chart.sh nginx/nginx.chart.sh nut/nut.chart.sh \
+ opensips/opensips.chart.sh phpfpm/phpfpm.chart.sh \
+ postfix/postfix.chart.sh sensors/sensors.chart.sh \
+ squid/squid.chart.sh tomcat/tomcat.chart.sh
+userchartsconfigdir = $(configdir)/charts.d
+dist_userchartsconfig_DATA = \
+ $(top_srcdir)/installer/.keep \
+ $(NULL)
+
+chartsconfigdir = $(libconfigdir)/charts.d
+dist_chartsconfig_DATA = $(top_srcdir)/installer/.keep $(NULL) \
+ ap/ap.conf apache/apache.conf apcupsd/apcupsd.conf \
+ cpu_apps/cpu_apps.conf cpufreq/cpufreq.conf \
+ example/example.conf exim/exim.conf hddtemp/hddtemp.conf \
+ libreswan/libreswan.conf load_average/load_average.conf \
+ mem_apps/mem_apps.conf mysql/mysql.conf nginx/nginx.conf \
+ nut/nut.conf opensips/opensips.conf phpfpm/phpfpm.conf \
+ postfix/postfix.conf sensors/sensors.conf squid/squid.conf \
+ tomcat/tomcat.conf
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .in
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(srcdir)/ap/Makefile.inc $(srcdir)/apache/Makefile.inc $(srcdir)/apcupsd/Makefile.inc $(srcdir)/cpu_apps/Makefile.inc $(srcdir)/cpufreq/Makefile.inc $(srcdir)/example/Makefile.inc $(srcdir)/exim/Makefile.inc $(srcdir)/hddtemp/Makefile.inc $(srcdir)/libreswan/Makefile.inc $(srcdir)/load_average/Makefile.inc $(srcdir)/mem_apps/Makefile.inc $(srcdir)/mysql/Makefile.inc $(srcdir)/nginx/Makefile.inc $(srcdir)/nut/Makefile.inc $(srcdir)/opensips/Makefile.inc $(srcdir)/phpfpm/Makefile.inc $(srcdir)/postfix/Makefile.inc $(srcdir)/sensors/Makefile.inc $(srcdir)/squid/Makefile.inc $(srcdir)/tomcat/Makefile.inc $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/charts.d.plugin/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu collectors/charts.d.plugin/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+$(top_srcdir)/build/subst.inc $(srcdir)/ap/Makefile.inc $(srcdir)/apache/Makefile.inc $(srcdir)/apcupsd/Makefile.inc $(srcdir)/cpu_apps/Makefile.inc $(srcdir)/cpufreq/Makefile.inc $(srcdir)/example/Makefile.inc $(srcdir)/exim/Makefile.inc $(srcdir)/hddtemp/Makefile.inc $(srcdir)/libreswan/Makefile.inc $(srcdir)/load_average/Makefile.inc $(srcdir)/mem_apps/Makefile.inc $(srcdir)/mysql/Makefile.inc $(srcdir)/nginx/Makefile.inc $(srcdir)/nut/Makefile.inc $(srcdir)/opensips/Makefile.inc $(srcdir)/phpfpm/Makefile.inc $(srcdir)/postfix/Makefile.inc $(srcdir)/sensors/Makefile.inc $(srcdir)/squid/Makefile.inc $(srcdir)/tomcat/Makefile.inc:
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+install-dist_chartsSCRIPTS: $(dist_charts_SCRIPTS)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_charts_SCRIPTS)'; test -n "$(chartsdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(chartsdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(chartsdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
+ done | \
+ sed -e 'p;s,.*/,,;n' \
+ -e 'h;s|.*|.|' \
+ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
+ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
+ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
+ if ($$2 == $$4) { files[d] = files[d] " " $$1; \
+ if (++n[d] == $(am__install_max)) { \
+ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
+ else { print "f", d "/" $$4, $$1 } } \
+ END { for (d in files) print "f", d, files[d] }' | \
+ while read type dir files; do \
+ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
+ test -z "$$files" || { \
+ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(chartsdir)$$dir'"; \
+ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(chartsdir)$$dir" || exit $$?; \
+ } \
+ ; done
+
+uninstall-dist_chartsSCRIPTS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_charts_SCRIPTS)'; test -n "$(chartsdir)" || exit 0; \
+ files=`for p in $$list; do echo "$$p"; done | \
+ sed -e 's,.*/,,;$(transform)'`; \
+ dir='$(DESTDIR)$(chartsdir)'; $(am__uninstall_files_from_dir)
+install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
+ done | \
+ sed -e 'p;s,.*/,,;n' \
+ -e 'h;s|.*|.|' \
+ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
+ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
+ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
+ if ($$2 == $$4) { files[d] = files[d] " " $$1; \
+ if (++n[d] == $(am__install_max)) { \
+ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
+ else { print "f", d "/" $$4, $$1 } } \
+ END { for (d in files) print "f", d, files[d] }' | \
+ while read type dir files; do \
+ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
+ test -z "$$files" || { \
+ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \
+ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \
+ } \
+ ; done
+
+uninstall-dist_pluginsSCRIPTS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \
+ files=`for p in $$list; do echo "$$p"; done | \
+ sed -e 's,.*/,,;$(transform)'`; \
+ dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir)
+install-dist_chartsDATA: $(dist_charts_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_charts_DATA)'; test -n "$(chartsdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(chartsdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(chartsdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(chartsdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(chartsdir)" || exit $$?; \
+ done
+
+uninstall-dist_chartsDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_charts_DATA)'; test -n "$(chartsdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(chartsdir)'; $(am__uninstall_files_from_dir)
+install-dist_chartsconfigDATA: $(dist_chartsconfig_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_chartsconfig_DATA)'; test -n "$(chartsconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(chartsconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(chartsconfigdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(chartsconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(chartsconfigdir)" || exit $$?; \
+ done
+
+uninstall-dist_chartsconfigDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_chartsconfig_DATA)'; test -n "$(chartsconfigdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(chartsconfigdir)'; $(am__uninstall_files_from_dir)
+install-dist_libconfigDATA: $(dist_libconfig_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \
+ done
+
+uninstall-dist_libconfigDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir)
+install-dist_userchartsconfigDATA: $(dist_userchartsconfig_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_userchartsconfig_DATA)'; test -n "$(userchartsconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(userchartsconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(userchartsconfigdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(userchartsconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(userchartsconfigdir)" || exit $$?; \
+ done
+
+uninstall-dist_userchartsconfigDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_userchartsconfig_DATA)'; test -n "$(userchartsconfigdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(userchartsconfigdir)'; $(am__uninstall_files_from_dir)
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(SCRIPTS) $(DATA)
+installdirs:
+ for dir in "$(DESTDIR)$(chartsdir)" "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(chartsdir)" "$(DESTDIR)$(chartsconfigdir)" "$(DESTDIR)$(libconfigdir)" "$(DESTDIR)$(userchartsconfigdir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+ -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am: install-dist_chartsDATA install-dist_chartsSCRIPTS \
+ install-dist_chartsconfigDATA install-dist_libconfigDATA \
+ install-dist_pluginsSCRIPTS install-dist_userchartsconfigDATA
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-dist_chartsDATA uninstall-dist_chartsSCRIPTS \
+ uninstall-dist_chartsconfigDATA uninstall-dist_libconfigDATA \
+ uninstall-dist_pluginsSCRIPTS \
+ uninstall-dist_userchartsconfigDATA
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dist_chartsDATA \
+ install-dist_chartsSCRIPTS install-dist_chartsconfigDATA \
+ install-dist_libconfigDATA install-dist_pluginsSCRIPTS \
+ install-dist_userchartsconfigDATA install-dvi install-dvi-am \
+ install-exec install-exec-am install-html install-html-am \
+ install-info install-info-am install-man install-pdf \
+ install-pdf-am install-ps install-ps-am install-strip \
+ installcheck installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am \
+ uninstall-dist_chartsDATA uninstall-dist_chartsSCRIPTS \
+ uninstall-dist_chartsconfigDATA uninstall-dist_libconfigDATA \
+ uninstall-dist_pluginsSCRIPTS \
+ uninstall-dist_userchartsconfigDATA
+
+.in:
+ if sed \
+ -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \
+ -e 's#[@]sbindir_POST@#$(sbindir)#g' \
+ -e 's#[@]sysconfdir_POST@#$(sysconfdir)#g' \
+ -e 's#[@]pythondir_POST@#$(pythondir)#g' \
+ -e 's#[@]configdir_POST@#$(configdir)#g' \
+ -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \
+ -e 's#[@]cachedir_POST@#$(cachedir)#g' \
+ $< > $@.tmp; then \
+ mv "$@.tmp" "$@"; \
+ else \
+ rm -f "$@.tmp"; \
+ false; \
+ fi
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/collectors/charts.d.plugin/README.md b/collectors/charts.d.plugin/README.md
new file mode 100644
index 000000000..b224bffe3
--- /dev/null
+++ b/collectors/charts.d.plugin/README.md
@@ -0,0 +1,193 @@
+# charts.d.plugin
+
+`charts.d.plugin` is a netdata external plugin. It is an **orchestrator** for data collection modules written in `BASH` v4+.
+
+1. It runs as an independent process `ps fax` shows it
+2. It is started and stopped automatically by netdata
+3. It communicates with netdata via a unidirectional pipe (sending data to the netdata daemon)
+4. Supports any number of data collection **modules**
+
+`charts.d.plugin` has been designed so that the actual script that will do data collection will be permanently in
+memory, collecting data with as little overheads as possible
+(i.e. initialize once, repeatedly collect values with minimal overhead).
+
+`charts.d.plugin` looks for scripts in `/usr/lib/netdata/charts.d`.
+The scripts should have the filename suffix: `.chart.sh`.
+
+## Configuration
+
+`charts.d.plugin` itself can be configured using the configuration file `/etc/netdata/charts.d.conf`
+(to edit it on your system run `/etc/netdata/edit-config charts.d.conf`). This file is also a BASH script.
+
+In this file, you can place statements like this:
+
+```
+enable_all_charts="yes"
+X="yes"
+Y="no"
+```
+
+where `X` and `Y` are the names of individual charts.d collector scripts.
+When set to `yes`, charts.d will evaluate the collector script (see below).
+When set to `no`, charts.d will ignore the collector script.
+
+The variable `enable_all_charts` sets the default enable/disable state for all charts.
+
+## A charts.d module
+
+A `charts.d.plugin` module is a BASH script defining a few functions.
+
+For a module called `X`, the following criteria must be met:
+
+1. The module script must be called `X.chart.sh` and placed in `/usr/libexec/netdata/charts.d`.
+
+2. If the module needs a configuration, it should be called `X.conf` and placed in `/etc/netdata/charts.d`.
+ The configuration file `X.conf` is also a BASH script itself.
+ To edit the default files supplied by netdata run `/etc/netdata/edit-config charts.d/X.conf`,
+ where `X` is the name of the module.
+
+3. All functions and global variables defined in the script and its configuration, must begin with `X_`.
+
+4. The following functions must be defined:
+
+ - `X_check()` - returns 0 or 1 depending on whether the module is able to run or not
+ (following the standard Linux command line return codes: 0 = OK, the collector can operate and 1 = FAILED,
+ the collector cannot be used).
+
+ - `X_create()` - creates the netdata charts, following the standard netdata plugin guides as described in
+ **[External Plugins](../plugins.d/)** (commands `CHART` and `DIMENSION`).
+ The return value does matter: 0 = OK, 1 = FAILED.
+
+ - `X_update()` - collects the values for the defined charts, following the standard netdata plugin guides
+ as described in **[External Plugins](../plugins.d/)** (commands `BEGIN`, `SET`, `END`).
+ The return value also matters: 0 = OK, 1 = FAILED.
+
+5. The following global variables are available to be set:
+ - `X_update_every` - is the data collection frequency for the module script, in seconds.
+
+The module script may use more functions or variables. But all of them must begin with `X_`.
+
+The standard netdata plugin variables are also available (check **[External Plugins](../plugins.d/)**).
+
+### X_check()
+
+The purpose of the BASH function `X_check()` is to check if the module can collect data (or check its config).
+
+For example, if the module is about monitoring a local mysql database, the `X_check()` function may attempt to
+connect to a local mysql database to find out if it can read the values it needs.
+
+`X_check()` is run only once for the lifetime of the module.
+
+### X_create()
+
+The purpose of the BASH function `X_create()` is to create the charts and dimensions using the standard netdata
+plugin guides (**[External Plugins](../plugins.d/)**).
+
+`X_create()` will be called just once and only after `X_check()` was successful.
+You can however call it yourself when there is need for it (for example to add a new dimension to an existing chart).
+
+A non-zero return value will disable the collector.
+
+### X_update()
+
+`X_update()` will be called repeatedly every `X_update_every` seconds, to collect new values and send them to netdata,
+following the netdata plugin guides (**[External Plugins](../plugins.d/)**).
+
+The function will be called with one parameter: microseconds since the last time it was run. This value should be
+appended to the `BEGIN` statement of every chart updated by the collector script.
+
+A non-zero return value will disable the collector.
+
+### Useful functions charts.d provides
+
+Module scripts can use the following charts.d functions:
+
+#### require_cmd command
+
+`require_cmd()` will check if a command is available in the running system.
+
+For example, your `X_check()` function may use it like this:
+
+```sh
+mysql_check() {
+ require_cmd mysql || return 1
+ return 0
+}
+```
+
+Using the above, if the command `mysql` is not available in the system, the `mysql` module will be disabled.
+
+#### fixid "string"
+
+`fixid()` will get a string and return a properly formatted id for a chart or dimension.
+
+This is an expensive function that should not be used in `X_update()`.
+You can keep the generated id in a BASH associative array to have the values availables in `X_update()`, like this:
+
+```sh
+declare -A X_ids=()
+X_create() {
+ local name="a very bad name for id"
+
+ X_ids[$name]="$(fixid "$name")"
+}
+
+X_update() {
+ local microseconds="$1"
+
+ ...
+ local name="a very bad name for id"
+ ...
+
+ echo "BEGIN ${X_ids[$name]} $microseconds"
+ ...
+}
+```
+
+### Debugging your collectors
+
+You can run `charts.d.plugin` by hand with something like this:
+
+```sh
+# become user netdata
+sudo su -s /bin/sh netdata
+
+# run the plugin in debug mode
+/usr/libexec/netdata/plugins.d/charts.d.plugin debug 1 X Y Z
+```
+
+Charts.d will run in `debug` mode, with an update frequency of `1`, evaluating only the collector scripts
+`X`, `Y` and `Z`. You can define zero or more module scripts. If none is defined, charts.d will evaluate all
+module scripts available.
+
+Keep in mind that if your configs are not in `/etc/netdata`, you should do the following before running
+`charts.d.plugin`:
+
+```sh
+export NETDATA_USER_CONFIG_DIR="/path/to/etc/netdata"
+```
+
+Also, remember that netdata runs `chart.d.plugin` as user `netdata` (or any other user netdata is configured to run as).
+
+
+## Running multiple instances of charts.d.plugin
+
+`charts.d.plugin` will call the `X_update()` function one after another. This means that a delay in collector `X`
+will also delay the collection of `Y` and `Z`.
+
+You can have multiple `charts.d.plugin` running to overcome this problem.
+
+This is what you need to do:
+
+1. Decide a new name for the new charts.d instance: example `charts2.d`.
+
+2. Create/edit the files `/etc/netdata/charts.d.conf` and `/etc/netdata/charts2.d.conf` and enable / disable the
+ module you want each to run. Remember to set `enable_all_charts="no"` to both of them, and enable the individual
+ modules for each.
+
+3. link `/usr/libexec/netdata/plugins.d/charts.d.plugin` to `/usr/libexec/netdata/plugins.d/charts2.d.plugin`.
+ Netdata will spawn a new charts.d process.
+
+Execute the above in this order, since netdata will (by default) attempt to start new plugins soon after they are
+created in `/usr/libexec/netdata/plugins.d/`.
+
diff --git a/collectors/charts.d.plugin/ap/Makefile.inc b/collectors/charts.d.plugin/ap/Makefile.inc
new file mode 100644
index 000000000..a2dd375ac
--- /dev/null
+++ b/collectors/charts.d.plugin/ap/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += ap/ap.chart.sh
+dist_chartsconfig_DATA += ap/ap.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += ap/README.md ap/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/ap/README.md b/collectors/charts.d.plugin/ap/README.md
new file mode 100644
index 000000000..eb4e80707
--- /dev/null
+++ b/collectors/charts.d.plugin/ap/README.md
@@ -0,0 +1,84 @@
+# Access Point Plugin (ap)
+
+The `ap` collector visualizes data related to access points.
+
+## Example netdata charts
+
+![image](https://cloud.githubusercontent.com/assets/2662304/12377654/9f566e88-bd2d-11e5-855a-e0ba96b8fd98.png)
+
+## How it works
+
+It does the following:
+
+1. Runs `iw dev` searching for interfaces that have `type AP`.
+
+ From the same output it collects the SSIDs each AP supports by looking for lines `ssid NAME`.
+
+ Example:
+```sh
+# iw dev
+phy#0
+ Interface wlan0
+ ifindex 3
+ wdev 0x1
+ addr 7c:dd:90:77:34:2a
+ ssid TSAOUSIS
+ type AP
+ channel 7 (2442 MHz), width: 20 MHz, center1: 2442 MHz
+```
+
+
+2. For each interface found, it runs `iw INTERFACE station dump`.
+
+ From the output is collects:
+
+ - rx/tx bytes
+ - rx/tx packets
+ - tx retries
+ - tx failed
+ - signal strength
+ - rx/tx bitrate
+ - expected throughput
+
+ Example:
+
+```sh
+# iw wlan0 station dump
+Station 40:b8:37:5a:ed:5e (on wlan0)
+ inactive time: 910 ms
+ rx bytes: 15588897
+ rx packets: 127772
+ tx bytes: 52257763
+ tx packets: 95802
+ tx retries: 2162
+ tx failed: 28
+ signal: -43 dBm
+ signal avg: -43 dBm
+ tx bitrate: 65.0 MBit/s MCS 7
+ rx bitrate: 1.0 MBit/s
+ expected throughput: 32.125Mbps
+ authorized: yes
+ authenticated: yes
+ preamble: long
+ WMM/WME: yes
+ MFP: no
+ TDLS peer: no
+```
+
+3. For each interface found, it creates 6 charts:
+
+ - Number of Connected clients
+ - Bandwidth for all clients
+ - Packets for all clients
+ - Transmit Issues for all clients
+ - Average Signal among all clients
+ - Average Bitrate (including average expected throughput) among all clients
+
+## Configuration
+
+You can only set `ap_update_every=NUMBER` to `/etc/netdata/charts.d/ap.conf`, to give the data collection frequency.
+To edit this file on your system run `/etc/netdata/edit-config charts.d/ap.conf`.
+
+## Auto-detection
+
+The plugin is able to auto-detect if you are running access points on your linux box.
diff --git a/collectors/charts.d.plugin/ap/ap.chart.sh b/collectors/charts.d.plugin/ap/ap.chart.sh
new file mode 100644
index 000000000..ccc36120c
--- /dev/null
+++ b/collectors/charts.d.plugin/ap/ap.chart.sh
@@ -0,0 +1,182 @@
+# shellcheck shell=bash
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+#
+
+# _update_every is a special variable - it holds the number of seconds
+# between the calls of the _update() function
+ap_update_every=
+ap_priority=6900
+
+declare -A ap_devs=()
+
+# _check is called once, to find out if this chart should be enabled or not
+ap_check() {
+ require_cmd iw || return 1
+ local ev
+ ev=$(run iw dev | awk '
+ BEGIN {
+ i = "";
+ ssid = "";
+ ap = 0;
+ }
+ /^[ \t]+Interface / {
+ if( ap == 1 ) {
+ print "ap_devs[" i "]=\"" ssid "\""
+ }
+
+ i = $2;
+ ssid = "";
+ ap = 0;
+ }
+ /^[ \t]+ssid / { ssid = $2; }
+ /^[ \t]+type AP$/ { ap = 1; }
+ END {
+ if( ap == 1 ) {
+ print "ap_devs[" i "]=\"" ssid "\""
+ }
+ }
+ ')
+ eval "${ev}"
+
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
+
+ [ ${#ap_devs[@]} -gt 0 ] && return 0
+ error "no devices found in AP mode, with 'iw dev'"
+ return 1
+}
+
+# _create is called once, to create the charts
+ap_create() {
+ local ssid dev
+
+ for dev in "${!ap_devs[@]}"
+ do
+ ssid="${ap_devs[${dev}]}"
+
+ # create the chart with 3 dimensions
+ cat <<EOF
+CHART ap_clients.${dev} '' "Connected clients to ${ssid} on ${dev}" "clients" ${dev} ap.clients line $((ap_priority + 1)) $ap_update_every
+DIMENSION clients '' absolute 1 1
+
+CHART ap_bandwidth.${dev} '' "Bandwidth for ${ssid} on ${dev}" "kilobits/s" ${dev} ap.net area $((ap_priority + 2)) $ap_update_every
+DIMENSION received '' incremental 8 1024
+DIMENSION sent '' incremental -8 1024
+
+CHART ap_packets.${dev} '' "Packets for ${ssid} on ${dev}" "packets/s" ${dev} ap.packets line $((ap_priority + 3)) $ap_update_every
+DIMENSION received '' incremental 1 1
+DIMENSION sent '' incremental -1 1
+
+CHART ap_issues.${dev} '' "Transmit Issues for ${ssid} on ${dev}" "issues/s" ${dev} ap.issues line $((ap_priority + 4)) $ap_update_every
+DIMENSION retries 'tx retries' incremental 1 1
+DIMENSION failures 'tx failures' incremental -1 1
+
+CHART ap_signal.${dev} '' "Average Signal for ${ssid} on ${dev}" "dBm" ${dev} ap.signal line $((ap_priority + 5)) $ap_update_every
+DIMENSION signal 'average signal' absolute 1 1000
+
+CHART ap_bitrate.${dev} '' "Bitrate for ${ssid} on ${dev}" "Mbps" ${dev} ap.bitrate line $((ap_priority + 6)) $ap_update_every
+DIMENSION receive '' absolute 1 1000
+DIMENSION transmit '' absolute -1 1000
+DIMENSION expected 'expected throughput' absolute 1 1000
+EOF
+ done
+
+ return 0
+}
+
+# _update is called continuously, to collect the values
+ap_update() {
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
+
+ # do all the work to collect / calculate the values
+ # for each dimension
+ # remember: KEEP IT SIMPLE AND SHORT
+
+ for dev in "${!ap_devs[@]}"
+ do
+ echo
+ echo "DEVICE ${dev}"
+ iw "${dev}" station dump
+ done | awk "
+ function zero_data() {
+ dev = \"\";
+ c = 0;
+ rb = 0;
+ tb = 0;
+ rp = 0;
+ tp = 0;
+ tr = 0;
+ tf = 0;
+ tt = 0;
+ rt = 0;
+ s = 0;
+ g = 0;
+ e = 0;
+ }
+ function print_device() {
+ if(dev != \"\" && length(dev) > 0) {
+ print \"BEGIN ap_clients.\" dev;
+ print \"SET clients = \" c;
+ print \"END\";
+ print \"BEGIN ap_bandwidth.\" dev;
+ print \"SET received = \" rb;
+ print \"SET sent = \" tb;
+ print \"END\";
+ print \"BEGIN ap_packets.\" dev;
+ print \"SET received = \" rp;
+ print \"SET sent = \" tp;
+ print \"END\";
+ print \"BEGIN ap_issues.\" dev;
+ print \"SET retries = \" tr;
+ print \"SET failures = \" tf;
+ print \"END\";
+
+ if( c == 0 ) c = 1;
+ print \"BEGIN ap_signal.\" dev;
+ print \"SET signal = \" int(s / c);
+ print \"END\";
+ print \"BEGIN ap_bitrate.\" dev;
+ print \"SET receive = \" int(rt / c);
+ print \"SET transmit = \" int(tt / c);
+ print \"SET expected = \" int(e / c);
+ print \"END\";
+ }
+ zero_data();
+ }
+ BEGIN {
+ zero_data();
+ }
+ /^DEVICE / {
+ print_device();
+ dev = \$2;
+ }
+ /^Station/ { c++; }
+ /^[ \\t]+rx bytes:/ { rb += \$3; }
+ /^[ \\t]+tx bytes:/ { tb += \$3; }
+ /^[ \\t]+rx packets:/ { rp += \$3; }
+ /^[ \\t]+tx packets:/ { tp += \$3; }
+ /^[ \\t]+tx retries:/ { tr += \$3; }
+ /^[ \\t]+tx failed:/ { tf += \$3; }
+ /^[ \\t]+signal:/ { x = \$2; s += x * 1000; }
+ /^[ \\t]+rx bitrate:/ { x = \$3; rt += x * 1000; }
+ /^[ \\t]+tx bitrate:/ { x = \$3; tt += x * 1000; }
+ /^[ \\t]+expected throughput:(.*)Mbps/ {
+ x=\$3;
+ sub(/Mbps/, \"\", x);
+ e += x * 1000;
+ }
+ END {
+ print_device();
+ }
+ "
+
+ return 0
+}
+
diff --git a/collectors/charts.d.plugin/ap/ap.conf b/collectors/charts.d.plugin/ap/ap.conf
new file mode 100644
index 000000000..38fc157ce
--- /dev/null
+++ b/collectors/charts.d.plugin/ap/ap.conf
@@ -0,0 +1,23 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+# nothing fancy to configure.
+# this module will run
+# iw dev - to find wireless devices in AP mode
+# iw ${dev} station dump - to get connected clients
+# based on the above, it generates several charts
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#ap_update_every=
+
+# the charts priority on the dashboard
+#ap_priority=6900
+
+# the number of retries to do in case of failure
+# before disabling the module
+#ap_retries=10
diff --git a/collectors/charts.d.plugin/apache/Makefile.inc b/collectors/charts.d.plugin/apache/Makefile.inc
new file mode 100644
index 000000000..4b360eae0
--- /dev/null
+++ b/collectors/charts.d.plugin/apache/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += apache/apache.chart.sh
+dist_chartsconfig_DATA += apache/apache.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += apache/README.md apache/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/apache/README.md b/collectors/charts.d.plugin/apache/README.md
new file mode 100644
index 000000000..890cee984
--- /dev/null
+++ b/collectors/charts.d.plugin/apache/README.md
@@ -0,0 +1,127 @@
+> THIS MODULE IS OBSOLETE.
+> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
+
+---
+
+# Apache Plugin (apache)
+
+The `apache` collector visualizes key performance data for an apache web server.
+
+## Example netdata charts
+
+For apache 2.2:
+
+![image](https://cloud.githubusercontent.com/assets/2662304/12530273/421c4d14-c1e2-11e5-9fb6-ca6d6dd3b1dd.png)
+
+For apache 2.4:
+
+![image](https://cloud.githubusercontent.com/assets/2662304/12530376/29ec26de-c1e6-11e5-9af1-e48aaf781795.png)
+
+## How it works
+
+It runs `curl "http://apache.host/server-status?auto` to fetch the current status of apache.
+
+It has been tested with apache 2.2 and apache 2.4. The latter also provides connections information (total and break down by status).
+
+Apache 2.2 response:
+
+```sh
+$ curl "http://127.0.0.1/server-status?auto"
+Total Accesses: 80057
+Total kBytes: 223017
+CPULoad: .018287
+Uptime: 64472
+ReqPerSec: 1.24173
+BytesPerSec: 3542.15
+BytesPerReq: 2852.59
+BusyWorkers: 1
+IdleWorkers: 49
+Scoreboard: _________________________......................................._W_______________________.......................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................
+```
+
+Apache 2.4 response:
+
+```sh
+$ curl "http://127.0.0.1/server-status?auto"
+127.0.0.1
+ServerVersion: Apache/2.4.18 (Unix)
+ServerMPM: event
+Server Built: Dec 14 2015 08:05:54
+CurrentTime: Saturday, 23-Jan-2016 14:42:06 EET
+RestartTime: Saturday, 23-Jan-2016 04:57:13 EET
+ParentServerConfigGeneration: 2
+ParentServerMPMGeneration: 1
+ServerUptimeSeconds: 35092
+ServerUptime: 9 hours 44 minutes 52 seconds
+Load1: 0.32
+Load5: 0.32
+Load15: 0.27
+Total Accesses: 32403
+Total kBytes: 34464
+CPUUser: 30.37
+CPUSystem: 29.55
+CPUChildrenUser: 0
+CPUChildrenSystem: 0
+CPULoad: .170751
+Uptime: 35092
+ReqPerSec: .923373
+BytesPerSec: 1005.67
+BytesPerReq: 1089.13
+BusyWorkers: 1
+IdleWorkers: 99
+ConnsTotal: 0
+ConnsAsyncWriting: 0
+ConnsAsyncKeepAlive: 0
+ConnsAsyncClosing: 0
+Scoreboard: __________________________________________________________________________________________W_________............................................................................................................................................................................................................................................................................................................
+```
+
+From the apache status output it collects:
+
+ - total accesses (incremental value, rendered as requests/s)
+ - total bandwidth (incremental value, rendered as bandwidth/s)
+ - requests per second (this appears to be calculated by apache as an average for its lifetime, while the one calculated by netdata using the total accesses counter is real-time)
+ - bytes per second (average for the lifetime of the apache server)
+ - bytes per request (average for the lifetime of the apache server)
+ - workers by status (`busy` and `idle`)
+ - total connections (currently active connections - offered by apache 2.4+)
+ - async connections per status (`keepalive`, `writing`, `closing` - offered by apache 2.4+)
+
+## Configuration
+
+The configuration is stored in `/etc/netdata/charts.d/apache.conf`.
+To edit this file on your system run `/etc/netdata/edit-config charts.d/apache.conf`.
+
+The internal default is:
+
+```sh
+# the URL your apache server is responding with mod_status information.
+apache_url="http://127.0.0.1:80/server-status?auto"
+
+# use this to set custom curl options you may need
+apache_curl_opts=
+
+# set this to a NUMBER to overwrite the update frequency
+# it is in seconds
+apache_update_every=
+```
+
+The default `apache_update_every` is configured in netdata.
+
+## Auto-detection
+
+If you have configured your apache server to offer server-status information on localhost clients, the defaults should work fine.
+
+## Apache Configuration
+
+Apache configuration differs between distributions. Please check your distribution's documentation for information on enabling apache's `mod_status` module.
+
+If you are able to run successfully, by hand this command:
+
+```sh
+curl "http://127.0.0.1:80/server-status?auto"
+```
+
+netdata will be able to do it too.
+
+Notice: You may need to have the default `000-default.conf ` website enabled in order for the status mod to work.
diff --git a/collectors/charts.d.plugin/apache/apache.chart.sh b/collectors/charts.d.plugin/apache/apache.chart.sh
new file mode 100644
index 000000000..95876432f
--- /dev/null
+++ b/collectors/charts.d.plugin/apache/apache.chart.sh
@@ -0,0 +1,258 @@
+# shellcheck shell=bash
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+#
+
+# the URL to download apache status info
+apache_url="http://127.0.0.1:80/server-status?auto"
+apache_curl_opts=
+
+# _update_every is a special variable - it holds the number of seconds
+# between the calls of the _update() function
+apache_update_every=
+
+apache_priority=60000
+
+# convert apache floating point values
+# to integer using this multiplier
+# this only affects precision - the values
+# will be in the proper units
+apache_decimal_detail=1000000
+
+declare -a apache_response=()
+apache_accesses=0
+apache_kbytes=0
+apache_reqpersec=0
+apache_bytespersec=0
+apache_bytesperreq=0
+apache_busyworkers=0
+apache_idleworkers=0
+apache_connstotal=0
+apache_connsasyncwriting=0
+apache_connsasynckeepalive=0
+apache_connsasyncclosing=0
+
+apache_keys_detected=0
+apache_has_conns=0
+apache_key_accesses=
+apache_key_kbytes=
+apache_key_reqpersec=
+apache_key_bytespersec=
+apache_key_bytesperreq=
+apache_key_busyworkers=
+apache_key_idleworkers=
+apache_key_scoreboard=
+apache_key_connstotal=
+apache_key_connsasyncwriting=
+apache_key_connsasynckeepalive=
+apache_key_connsasyncclosing=
+apache_detect() {
+ local i=0
+ for x in "${@}"
+ do
+ case "${x}" in
+ 'Total Accesses') apache_key_accesses=$((i + 1)) ;;
+ 'Total kBytes') apache_key_kbytes=$((i + 1)) ;;
+ 'ReqPerSec') apache_key_reqpersec=$((i + 1)) ;;
+ 'BytesPerSec') apache_key_bytespersec=$((i + 1)) ;;
+ 'BytesPerReq') apache_key_bytesperreq=$((i + 1)) ;;
+ 'BusyWorkers') apache_key_busyworkers=$((i + 1)) ;;
+ 'IdleWorkers') apache_key_idleworkers=$((i + 1));;
+ 'ConnsTotal') apache_key_connstotal=$((i + 1)) ;;
+ 'ConnsAsyncWriting') apache_key_connsasyncwriting=$((i + 1)) ;;
+ 'ConnsAsyncKeepAlive') apache_key_connsasynckeepalive=$((i + 1)) ;;
+ 'ConnsAsyncClosing') apache_key_connsasyncclosing=$((i + 1)) ;;
+ 'Scoreboard') apache_key_scoreboard=$((i)) ;;
+ esac
+
+ i=$((i + 1))
+ done
+
+ # we will not check of the Conns*
+ # keys, since these are apache 2.4 specific
+ [ -z "${apache_key_accesses}" ] && error "missing 'Total Accesses' from apache server: ${*}" && return 1
+ [ -z "${apache_key_kbytes}" ] && error "missing 'Total kBytes' from apache server: ${*}" && return 1
+ [ -z "${apache_key_reqpersec}" ] && error "missing 'ReqPerSec' from apache server: ${*}" && return 1
+ [ -z "${apache_key_bytespersec}" ] && error "missing 'BytesPerSec' from apache server: ${*}" && return 1
+ [ -z "${apache_key_bytesperreq}" ] && error "missing 'BytesPerReq' from apache server: ${*}" && return 1
+ [ -z "${apache_key_busyworkers}" ] && error "missing 'BusyWorkers' from apache server: ${*}" && return 1
+ [ -z "${apache_key_idleworkers}" ] && error "missing 'IdleWorkers' from apache server: ${*}" && return 1
+ [ -z "${apache_key_scoreboard}" ] && error "missing 'Scoreboard' from apache server: ${*}" && return 1
+
+ if [ ! -z "${apache_key_connstotal}" ] && \
+ [ ! -z "${apache_key_connsasyncwriting}" ] && \
+ [ ! -z "${apache_key_connsasynckeepalive}" ] && \
+ [ ! -z "${apache_key_connsasyncclosing}" ]
+ then
+ apache_has_conns=1
+ else
+ apache_has_conns=0
+ fi
+
+ return 0
+}
+
+apache_get() {
+ local oIFS="${IFS}" ret
+ # shellcheck disable=2207
+ IFS=$':\n' apache_response=($(run curl -Ss ${apache_curl_opts} "${apache_url}"))
+ ret=$?
+ IFS="${oIFS}"
+
+ if [ $ret -ne 0 ] || [ "${#apache_response[@]}" -eq 0 ]
+ then
+ return 1
+ fi
+
+ # the last line on the apache output is "Scoreboard"
+ # we use this label to detect that the output has a new word count
+ if [ ${apache_keys_detected} -eq 0 ] || [ "${apache_response[${apache_key_scoreboard}]}" != "Scoreboard" ]
+ then
+ apache_detect "${apache_response[@]}" || return 1
+ apache_keys_detected=1
+ fi
+
+ apache_accesses="${apache_response[${apache_key_accesses}]}"
+ apache_kbytes="${apache_response[${apache_key_kbytes}]}"
+
+ float2int "${apache_response[${apache_key_reqpersec}]}" ${apache_decimal_detail}
+ apache_reqpersec=${FLOAT2INT_RESULT}
+
+ float2int "${apache_response[${apache_key_bytespersec}]}" ${apache_decimal_detail}
+ apache_bytespersec=${FLOAT2INT_RESULT}
+
+ float2int "${apache_response[${apache_key_bytesperreq}]}" ${apache_decimal_detail}
+ apache_bytesperreq=${FLOAT2INT_RESULT}
+
+ apache_busyworkers="${apache_response[${apache_key_busyworkers}]}"
+ apache_idleworkers="${apache_response[${apache_key_idleworkers}]}"
+
+ if [ -z "${apache_accesses}" ] || \
+ [ -z "${apache_kbytes}" ] || \
+ [ -z "${apache_reqpersec}" ] || \
+ [ -z "${apache_bytespersec}" ] || \
+ [ -z "${apache_bytesperreq}" ] || \
+ [ -z "${apache_busyworkers}" ]
+ [ -z "${apache_idleworkers}" ]
+ then
+ error "empty values got from apache server: ${apache_response[*]}"
+ return 1
+ fi
+
+ if [ ${apache_has_conns} -eq 1 ]
+ then
+ apache_connstotal="${apache_response[${apache_key_connstotal}]}"
+ apache_connsasyncwriting="${apache_response[${apache_key_connsasyncwriting}]}"
+ apache_connsasynckeepalive="${apache_response[${apache_key_connsasynckeepalive}]}"
+ apache_connsasyncclosing="${apache_response[${apache_key_connsasyncclosing}]}"
+ fi
+
+ return 0
+}
+
+# _check is called once, to find out if this chart should be enabled or not
+apache_check() {
+
+ apache_get
+ # shellcheck disable=2181
+ if [ $? -ne 0 ]
+ then
+ # shellcheck disable=2154
+ error "cannot find stub_status on URL '${apache_url}'. Please set apache_url='http://apache.server:80/server-status?auto' in $confd/apache.conf"
+ return 1
+ fi
+
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
+
+ return 0
+}
+
+# _create is called once, to create the charts
+apache_create() {
+ cat <<EOF
+CHART apache_local.bytesperreq '' "apache Lifetime Avg. Response Size" "bytes/request" statistics apache.bytesperreq area $((apache_priority + 8)) $apache_update_every
+DIMENSION size '' absolute 1 ${apache_decimal_detail}
+CHART apache_local.workers '' "apache Workers" "workers" workers apache.workers stacked $((apache_priority + 5)) $apache_update_every
+DIMENSION idle '' absolute 1 1
+DIMENSION busy '' absolute 1 1
+CHART apache_local.reqpersec '' "apache Lifetime Avg. Requests/s" "requests/s" statistics apache.reqpersec line $((apache_priority + 6)) $apache_update_every
+DIMENSION requests '' absolute 1 ${apache_decimal_detail}
+CHART apache_local.bytespersec '' "apache Lifetime Avg. Bandwidth/s" "kilobits/s" statistics apache.bytespersec area $((apache_priority + 7)) $apache_update_every
+DIMENSION sent '' absolute 8 $((apache_decimal_detail * 1000))
+CHART apache_local.requests '' "apache Requests" "requests/s" requests apache.requests line $((apache_priority + 1)) $apache_update_every
+DIMENSION requests '' incremental 1 1
+CHART apache_local.net '' "apache Bandwidth" "kilobits/s" bandwidth apache.net area $((apache_priority + 3)) $apache_update_every
+DIMENSION sent '' incremental 8 1
+EOF
+
+ if [ ${apache_has_conns} -eq 1 ]
+ then
+ cat <<EOF2
+CHART apache_local.connections '' "apache Connections" "connections" connections apache.connections line $((apache_priority + 2)) $apache_update_every
+DIMENSION connections '' absolute 1 1
+CHART apache_local.conns_async '' "apache Async Connections" "connections" connections apache.conns_async stacked $((apache_priority + 4)) $apache_update_every
+DIMENSION keepalive '' absolute 1 1
+DIMENSION closing '' absolute 1 1
+DIMENSION writing '' absolute 1 1
+EOF2
+ fi
+
+ return 0
+}
+
+# _update is called continuously, to collect the values
+apache_update() {
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
+
+ # do all the work to collect / calculate the values
+ # for each dimension
+ # remember: KEEP IT SIMPLE AND SHORT
+
+ apache_get || return 1
+
+ # write the result of the work.
+ cat <<VALUESEOF
+BEGIN apache_local.requests $1
+SET requests = $((apache_accesses))
+END
+BEGIN apache_local.net $1
+SET sent = $((apache_kbytes))
+END
+BEGIN apache_local.reqpersec $1
+SET requests = $((apache_reqpersec))
+END
+BEGIN apache_local.bytespersec $1
+SET sent = $((apache_bytespersec))
+END
+BEGIN apache_local.bytesperreq $1
+SET size = $((apache_bytesperreq))
+END
+BEGIN apache_local.workers $1
+SET idle = $((apache_idleworkers))
+SET busy = $((apache_busyworkers))
+END
+VALUESEOF
+
+ if [ ${apache_has_conns} -eq 1 ]
+ then
+ cat <<VALUESEOF2
+BEGIN apache_local.connections $1
+SET connections = $((apache_connstotal))
+END
+BEGIN apache_local.conns_async $1
+SET keepalive = $((apache_connsasynckeepalive))
+SET closing = $((apache_connsasyncclosing))
+SET writing = $((apache_connsasyncwriting))
+END
+VALUESEOF2
+ fi
+
+ return 0
+}
diff --git a/collectors/charts.d.plugin/apache/apache.conf b/collectors/charts.d.plugin/apache/apache.conf
new file mode 100644
index 000000000..50914cf32
--- /dev/null
+++ b/collectors/charts.d.plugin/apache/apache.conf
@@ -0,0 +1,30 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+# THIS PLUGIN IS DEPRECATED
+# USE THE PYTHON.D ONE
+
+# the URL to download apache status info
+#apache_url="http://127.0.0.1:80/server-status?auto"
+#apache_curl_opts=
+
+# convert apache floating point values
+# to integer using this multiplier
+# this only affects precision - the values
+# will be in the proper units
+#apache_decimal_detail=1000000
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#apache_update_every=
+
+# the charts priority on the dashboard
+#apache_priority=60000
+
+# the number of retries to do in case of failure
+# before disabling the module
+#apache_retries=10
diff --git a/collectors/charts.d.plugin/apcupsd/Makefile.inc b/collectors/charts.d.plugin/apcupsd/Makefile.inc
new file mode 100644
index 000000000..19cb9cad7
--- /dev/null
+++ b/collectors/charts.d.plugin/apcupsd/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += apcupsd/apcupsd.chart.sh
+dist_chartsconfig_DATA += apcupsd/apcupsd.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += apcupsd/README.md apcupsd/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/apcupsd/README.md b/collectors/charts.d.plugin/apcupsd/README.md
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/collectors/charts.d.plugin/apcupsd/README.md
diff --git a/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh b/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh
new file mode 100644
index 000000000..e26ef566a
--- /dev/null
+++ b/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh
@@ -0,0 +1,201 @@
+# shellcheck shell=bash
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+#
+
+apcupsd_ip=
+apcupsd_port=
+
+declare -A apcupsd_sources=(
+ ["local"]="127.0.0.1:3551"
+)
+
+# how frequently to collect UPS data
+apcupsd_update_every=10
+
+apcupsd_timeout=3
+
+# the priority of apcupsd related to other charts
+apcupsd_priority=90000
+
+apcupsd_get() {
+ run -t $apcupsd_timeout apcaccess status "$1"
+}
+
+apcupsd_check() {
+
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
+
+ require_cmd apcaccess || return 1
+
+ # backwards compatibility
+ if [ "${apcupsd_ip}:${apcupsd_port}" != ":" ]
+ then
+ apcupsd_sources["local"]="${apcupsd_ip}:${apcupsd_port}"
+ fi
+
+ local host working=0 failed=0
+ for host in "${!apcupsd_sources[@]}"
+ do
+ run apcupsd_get "${apcupsd_sources[${host}]}" >/dev/null
+ # shellcheck disable=2181
+ if [ $? -ne 0 ]
+ then
+ error "cannot get information for apcupsd server ${host} on ${apcupsd_sources[${host}]}."
+ failed=$((failed + 1))
+ elif [ "$(apcupsd_get "${apcupsd_sources[${host}]}" | awk '/^STATUS.*/{ print $3 }')" != "ONLINE" ]
+ then
+ error "APC UPS ${host} on ${apcupsd_sources[${host}]} is not online."
+ failed=$((failed + 1))
+ else
+ working=$((working + 1))
+ fi
+ done
+
+ if [ ${working} -eq 0 ]
+ then
+ error "No APC UPSes found available."
+ return 1
+ fi
+
+ return 0
+}
+
+apcupsd_create() {
+ local host src
+ for host in "${!apcupsd_sources[@]}"
+ do
+ src=${apcupsd_sources[${host}]}
+
+ # create the charts
+ cat <<EOF
+CHART apcupsd_${host}.charge '' "UPS Charge for ${host} on ${src}" "percentage" ups apcupsd.charge area $((apcupsd_priority + 1)) $apcupsd_update_every
+DIMENSION battery_charge charge absolute 1 100
+
+CHART apcupsd_${host}.battery_voltage '' "UPS Battery Voltage for ${host} on ${src}" "Volts" ups apcupsd.battery.voltage line $((apcupsd_priority + 3)) $apcupsd_update_every
+DIMENSION battery_voltage voltage absolute 1 100
+DIMENSION battery_voltage_nominal nominal absolute 1 100
+
+CHART apcupsd_${host}.input_voltage '' "UPS Input Voltage for ${host} on ${src}" "Volts" input apcupsd.input.voltage line $((apcupsd_priority + 4)) $apcupsd_update_every
+DIMENSION input_voltage voltage absolute 1 100
+DIMENSION input_voltage_min min absolute 1 100
+DIMENSION input_voltage_max max absolute 1 100
+
+CHART apcupsd_${host}.input_frequency '' "UPS Input Frequency for ${host} on ${src}" "Hz" input apcupsd.input.frequency line $((apcupsd_priority + 5)) $apcupsd_update_every
+DIMENSION input_frequency frequency absolute 1 100
+
+CHART apcupsd_${host}.output_voltage '' "UPS Output Voltage for ${host} on ${src}" "Volts" output apcupsd.output.voltage line $((apcupsd_priority + 6)) $apcupsd_update_every
+DIMENSION output_voltage voltage absolute 1 100
+DIMENSION output_voltage_nominal nominal absolute 1 100
+
+CHART apcupsd_${host}.load '' "UPS Load for ${host} on ${src}" "percentage" ups apcupsd.load area $((apcupsd_priority)) $apcupsd_update_every
+DIMENSION load load absolute 1 100
+
+CHART apcupsd_${host}.temp '' "UPS Temperature for ${host} on ${src}" "Celsius" ups apcupsd.temperature line $((apcupsd_priority + 7)) $apcupsd_update_every
+DIMENSION temp temp absolute 1 100
+
+CHART apcupsd_${host}.time '' "UPS Time Remaining for ${host} on ${src}" "Minutes" ups apcupsd.time area $((apcupsd_priority + 2)) $apcupsd_update_every
+DIMENSION time time absolute 1 100
+
+EOF
+ done
+ return 0
+}
+
+
+apcupsd_update() {
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
+
+ # do all the work to collect / calculate the values
+ # for each dimension
+ # remember: KEEP IT SIMPLE AND SHORT
+
+ local host working=0 failed=0
+ for host in "${!apcupsd_sources[@]}"
+ do
+ apcupsd_get "${apcupsd_sources[${host}]}" | awk "
+
+BEGIN {
+ battery_charge = 0;
+ battery_voltage = 0;
+ battery_voltage_nominal = 0;
+ input_voltage = 0;
+ input_voltage_min = 0;
+ input_voltage_max = 0;
+ input_frequency = 0;
+ output_voltage = 0;
+ output_voltage_nominal = 0;
+ load = 0;
+ temp = 0;
+ time = 0;
+}
+/^BCHARGE.*/ { battery_charge = \$3 * 100 };
+/^BATTV.*/ { battery_voltage = \$3 * 100 };
+/^NOMBATTV.*/ { battery_voltage_nominal = \$3 * 100 };
+/^LINEV.*/ { input_voltage = \$3 * 100 };
+/^MINLINEV.*/ { input_voltage_min = \$3 * 100 };
+/^MAXLINEV.*/ { input_voltage_max = \$3 * 100 };
+/^LINEFREQ.*/ { input_frequency = \$3 * 100 };
+/^OUTPUTV.*/ { output_voltage = \$3 * 100 };
+/^NOMOUTV.*/ { output_voltage_nominal = \$3 * 100 };
+/^LOADPCT.*/ { load = \$3 * 100 };
+/^ITEMP.*/ { temp = \$3 * 100 };
+/^TIMELEFT.*/ { time = \$3 * 100 };
+END {
+ print \"BEGIN apcupsd_${host}.charge $1\";
+ print \"SET battery_charge = \" battery_charge;
+ print \"END\"
+
+ print \"BEGIN apcupsd_${host}.battery_voltage $1\";
+ print \"SET battery_voltage = \" battery_voltage;
+ print \"SET battery_voltage_nominal = \" battery_voltage_nominal;
+ print \"END\"
+
+ print \"BEGIN apcupsd_${host}.input_voltage $1\";
+ print \"SET input_voltage = \" input_voltage;
+ print \"SET input_voltage_min = \" input_voltage_min;
+ print \"SET input_voltage_max = \" input_voltage_max;
+ print \"END\"
+
+ print \"BEGIN apcupsd_${host}.input_frequency $1\";
+ print \"SET input_frequency = \" input_frequency;
+ print \"END\"
+
+ print \"BEGIN apcupsd_${host}.output_voltage $1\";
+ print \"SET output_voltage = \" output_voltage;
+ print \"SET output_voltage_nominal = \" output_voltage_nominal;
+ print \"END\"
+
+ print \"BEGIN apcupsd_${host}.load $1\";
+ print \"SET load = \" load;
+ print \"END\"
+
+ print \"BEGIN apcupsd_${host}.temp $1\";
+ print \"SET temp = \" temp;
+ print \"END\"
+
+ print \"BEGIN apcupsd_${host}.time $1\";
+ print \"SET time = \" time;
+ print \"END\"
+}"
+ # shellcheck disable=SC2181
+ if [ $? -ne 0 ]
+ then
+ failed=$((failed + 1))
+ error "failed to get values for APC UPS ${host} on ${apcupsd_sources[${host}]}" && return 1
+ else
+ working=$((working + 1))
+ fi
+ done
+
+ [ $working -eq 0 ] && error "failed to get values from all APC UPSes" && return 1
+
+ return 0
+}
diff --git a/collectors/charts.d.plugin/apcupsd/apcupsd.conf b/collectors/charts.d.plugin/apcupsd/apcupsd.conf
new file mode 100644
index 000000000..679c0d61b
--- /dev/null
+++ b/collectors/charts.d.plugin/apcupsd/apcupsd.conf
@@ -0,0 +1,25 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+# add all your APC UPSes in this array - uncomment it too
+#declare -A apcupsd_sources=(
+# ["local"]="127.0.0.1:3551"
+#)
+
+# how long to wait for apcupsd to respond
+#apcupsd_timeout=3
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#apcupsd_update_every=10
+
+# the charts priority on the dashboard
+#apcupsd_priority=90000
+
+# the number of retries to do in case of failure
+# before disabling the module
+#apcupsd_retries=10
diff --git a/collectors/charts.d.plugin/charts.d.conf b/collectors/charts.d.plugin/charts.d.conf
new file mode 100644
index 000000000..acb2a6fae
--- /dev/null
+++ b/collectors/charts.d.plugin/charts.d.conf
@@ -0,0 +1,63 @@
+# This is the configuration for charts.d.plugin
+
+# Each of its collectors can read configuration eiher from this file
+# or a NAME.conf file (where NAME is the collector name).
+# The collector specific file has higher precedence.
+
+# This file is a shell script too.
+
+# -----------------------------------------------------------------------------
+
+# number of seconds to run without restart
+# after this time, charts.d.plugin will exit
+# netdata will restart it, but a small gap
+# will appear in the charts.d.plugin charts.
+#restart_timeout=$[3600 * 4]
+
+# when making iterations, charts.d can loop more frequently
+# to prevent plugins missing iterations.
+# this is a percentage relative to update_every to align its
+# iterations.
+# The minimum is 10%, the maximum 100%.
+# So, if update_every is 1 second and time_divisor is 50,
+# charts.d will iterate every 500ms.
+# Charts will be called to collect data only if the time
+# passed since the last time the collected data is equal or
+# above their update_every.
+#time_divisor=50
+
+# -----------------------------------------------------------------------------
+
+# the default enable/disable for all charts.d collectors
+# the default is "yes"
+# enable_all_charts="yes"
+
+# BY DEFAULT ENABLED MODULES
+# ap=yes
+# nut=yes
+# opensips=yes
+
+# -----------------------------------------------------------------------------
+# THESE NEED TO BE SET TO "force" TO BE ENABLED
+
+# Nothing useful.
+# Just an example charts.d plugin you can use as a template.
+# example=force
+
+# OLD MODULES THAT ARE NOW SERVED BY python.d.plugin
+# apache=force
+# cpufreq=force
+# exim=force
+# hddtemp=force
+# mysql=force
+# nginx=force
+# phpfpm=force
+# postfix=force
+# sensors=force
+# squid=force
+# tomcat=force
+
+# OLD MODULES THAT ARE NOW SERVED BY NETDATA DAEMON
+# cpu_apps=force
+# mem_apps=force
+# load_average=force
diff --git a/collectors/charts.d.plugin/charts.d.dryrun-helper.sh b/collectors/charts.d.plugin/charts.d.dryrun-helper.sh
new file mode 100755
index 000000000..67496c1bd
--- /dev/null
+++ b/collectors/charts.d.plugin/charts.d.dryrun-helper.sh
@@ -0,0 +1,78 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# shellcheck disable=SC2181
+
+# will stop the script for any error
+set -e
+
+me="$0"
+name="$1"
+chart="$2"
+conf="$3"
+
+can_diff=1
+
+tmp1="$(mktemp)"
+tmp2="$(mktemp)"
+
+myset() {
+ set | grep -v "^_=" | grep -v "^PIPESTATUS=" | grep -v "^BASH_LINENO="
+}
+
+# save 2 'set'
+myset >"$tmp1"
+myset >"$tmp2"
+
+# make sure they don't differ
+diff "$tmp1" "$tmp2" >/dev/null 2>&1
+if [ $? -ne 0 ]
+then
+ # they differ, we cannot do the check
+ echo >&2 "$me: cannot check with diff."
+ can_diff=0
+fi
+
+# do it again, now including the script
+myset >"$tmp1"
+
+# include the plugin and its config
+if [ -f "$conf" ]
+then
+ # shellcheck source=/dev/null
+ . "$conf"
+ if [ $? -ne 0 ]
+ then
+ echo >&2 "$me: cannot load config file $conf"
+ rm "$tmp1" "$tmp2"
+ exit 1
+ fi
+fi
+
+# shellcheck source=/dev/null
+. "$chart"
+if [ $? -ne 0 ]
+then
+ echo >&2 "$me: cannot load chart file $chart"
+ rm "$tmp1" "$tmp2"
+ exit 1
+fi
+
+# remove all variables starting with the plugin name
+myset | grep -v "^$name" >"$tmp2"
+
+if [ $can_diff -eq 1 ]
+then
+ # check if they are different
+ # make sure they don't differ
+ diff "$tmp1" "$tmp2" >&2
+ if [ $? -ne 0 ]
+ then
+ # they differ
+ rm "$tmp1" "$tmp2"
+ exit 1
+ fi
+fi
+
+rm "$tmp1" "$tmp2"
+exit 0
diff --git a/collectors/charts.d.plugin/charts.d.plugin b/collectors/charts.d.plugin/charts.d.plugin
new file mode 100644
index 000000000..1c6e8c5c9
--- /dev/null
+++ b/collectors/charts.d.plugin/charts.d.plugin
@@ -0,0 +1,743 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+#
+# charts.d.plugin allows easy development of BASH plugins
+#
+# if you need to run parallel charts.d processes, link this file to a different name
+# in the same directory, with a .plugin suffix and netdata will start both of them,
+# each will have a different config file and modules configuration directory.
+#
+
+export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin"
+
+PROGRAM_FILE="$0"
+PROGRAM_NAME="$(basename $0)"
+PROGRAM_NAME="${PROGRAM_NAME/.plugin}"
+MODULE_NAME="main"
+
+# -----------------------------------------------------------------------------
+# create temp dir
+
+debug=0
+TMP_DIR=
+chartsd_cleanup() {
+ trap '' EXIT QUIT HUP INT TERM
+
+ if [ ! -z "$TMP_DIR" -a -d "$TMP_DIR" ]
+ then
+ [ $debug -eq 1 ] && echo >&2 "$PROGRAM_NAME: cleaning up temporary directory $TMP_DIR ..."
+ rm -rf "$TMP_DIR"
+ fi
+ exit 0
+}
+trap chartsd_cleanup EXIT QUIT HUP INT TERM
+
+if [ $UID = "0" ]
+then
+ TMP_DIR="$( mktemp -d /var/run/netdata-${PROGRAM_NAME}-XXXXXXXXXX )"
+else
+ TMP_DIR="$( mktemp -d /tmp/.netdata-${PROGRAM_NAME}-XXXXXXXXXX )"
+fi
+
+logdate() {
+ date "+%Y-%m-%d %H:%M:%S"
+}
+
+log() {
+ local status="${1}"
+ shift
+
+ echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${MODULE_NAME}: ${*}"
+
+}
+
+warning() {
+ log WARNING "${@}"
+}
+
+error() {
+ log ERROR "${@}"
+}
+
+info() {
+ log INFO "${@}"
+}
+
+fatal() {
+ log FATAL "${@}"
+ echo "DISABLE"
+ exit 1
+}
+
+debug() {
+ [ $debug -eq 1 ] && log DEBUG "${@}"
+}
+
+# -----------------------------------------------------------------------------
+# check a few commands
+
+require_cmd() {
+ local x=$(which "${1}" 2>/dev/null || command -v "${1}" 2>/dev/null)
+ if [ -z "${x}" -o ! -x "${x}" ]
+ then
+ warning "command '${1}' is not found in ${PATH}."
+ eval "${1^^}_CMD=\"\""
+ return 1
+ fi
+
+ eval "${1^^}_CMD=\"${x}\""
+ return 0
+}
+
+require_cmd date || exit 1
+require_cmd sed || exit 1
+require_cmd basename || exit 1
+require_cmd dirname || exit 1
+require_cmd cat || exit 1
+require_cmd grep || exit 1
+require_cmd egrep || exit 1
+require_cmd mktemp || exit 1
+require_cmd awk || exit 1
+require_cmd timeout || exit 1
+require_cmd curl || exit 1
+
+# -----------------------------------------------------------------------------
+
+[ $(( ${BASH_VERSINFO[0]} )) -lt 4 ] && fatal "BASH version 4 or later is required, but found version: ${BASH_VERSION}. Please upgrade."
+
+info "started from '$PROGRAM_FILE' with options: $*"
+
+# -----------------------------------------------------------------------------
+# internal defaults
+# netdata exposes a few environment variables for us
+
+[ -z "${NETDATA_PLUGINS_DIR}" ] && NETDATA_PLUGINS_DIR="$(dirname "${0}")"
+[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="/usr/local/etc/netdata"
+[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="/usr/local/lib/netdata/conf.d"
+
+pluginsd="${NETDATA_PLUGINS_DIR}"
+stockconfd="${NETDATA_STOCK_CONFIG_DIR}/${PROGRAM_NAME}"
+userconfd="${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}"
+olduserconfd="${NETDATA_USER_CONFIG_DIR}"
+chartsd="$pluginsd/../charts.d"
+
+minimum_update_frequency="${NETDATA_UPDATE_EVERY-1}"
+update_every=${minimum_update_frequency} # this will be overwritten by the command line
+
+# work around for non BASH shells
+charts_create="_create"
+charts_update="_update"
+charts_check="_check"
+charts_undescore="_"
+
+# when making iterations, charts.d can loop more frequently
+# to prevent plugins missing iterations.
+# this is a percentage relative to update_every to align its
+# iterations.
+# The minimum is 10%, the maximum 100%.
+# So, if update_every is 1 second and time_divisor is 50,
+# charts.d will iterate every 500ms.
+# Charts will be called to collect data only if the time
+# passed since the last time the collected data is equal or
+# above their update_every.
+time_divisor=50
+
+# number of seconds to run without restart
+# after this time, charts.d.plugin will exit
+# netdata will restart it
+restart_timeout=$((3600 * 4))
+
+# check if the charts.d plugins are using global variables
+# they should not.
+# It does not currently support BASH v4 arrays, so it is
+# disabled
+dryrunner=0
+
+# check for timeout command
+check_for_timeout=1
+
+# the default enable/disable value for all charts
+enable_all_charts="yes"
+
+# -----------------------------------------------------------------------------
+# parse parameters
+
+check=0
+chart_only=
+while [ ! -z "$1" ]
+do
+ if [ "$1" = "check" ]
+ then
+ check=1
+ shift
+ continue
+ fi
+
+ if [ "$1" = "debug" -o "$1" = "all" ]
+ then
+ debug=1
+ shift
+ continue
+ fi
+
+ if [ -f "$chartsd/$1.chart.sh" ]
+ then
+ debug=1
+ chart_only="$( echo $1.chart.sh | sed "s/\.chart\.sh$//g" )"
+ shift
+ continue
+ fi
+
+ if [ -f "$chartsd/$1" ]
+ then
+ debug=1
+ chart_only="$( echo $1 | sed "s/\.chart\.sh$//g" )"
+ shift
+ continue
+ fi
+
+ # number check
+ n="$1"
+ x=$(( n ))
+ if [ "$x" = "$n" ]
+ then
+ shift
+ update_every=$x
+ [ $update_every -lt $minimum_update_frequency ] && update_every=$minimum_update_frequency
+ continue
+ fi
+
+ fatal "Cannot understand parameter $1. Aborting."
+done
+
+
+# -----------------------------------------------------------------------------
+# loop control
+
+# default sleep function
+LOOPSLEEPMS_HIGHRES=0
+now_ms=
+current_time_ms_default() {
+ now_ms="$(date +'%s')000"
+}
+current_time_ms="current_time_ms_default"
+current_time_ms_accuracy=1
+mysleep="sleep"
+
+# if found and included, this file overwrites loopsleepms()
+# and current_time_ms() with a high resolution timer function
+# for precise looping.
+source "$pluginsd/loopsleepms.sh.inc"
+[ $? -ne 0 ] && error "Failed to load '$pluginsd/loopsleepms.sh.inc'."
+
+# -----------------------------------------------------------------------------
+# load my configuration
+
+for myconfig in "${NETDATA_STOCK_CONFIG_DIR}/${PROGRAM_NAME}.conf" "${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}.conf"
+do
+ if [ -f "$myconfig" ]
+ then
+ source "$myconfig"
+ if [ $? -ne 0 ]
+ then
+ error "Config file '$myconfig' loaded with errors."
+ else
+ info "Configuration file '$myconfig' loaded."
+ fi
+ else
+ warning "Configuration file '$myconfig' not found."
+ fi
+done
+
+# make sure time_divisor is right
+time_divisor=$((time_divisor))
+[ $time_divisor -lt 10 ] && time_divisor=10
+[ $time_divisor -gt 100 ] && time_divisor=100
+
+
+# we check for the timeout command, after we load our
+# configuration, so that the user may overwrite the
+# timeout command we use, providing a function that
+# can emulate the timeout command we need:
+# > timeout SECONDS command ...
+if [ $check_for_timeout -eq 1 ]
+ then
+ require_cmd timeout || exit 1
+fi
+
+# -----------------------------------------------------------------------------
+# internal checks
+
+# netdata passes the requested update frequency as the first argument
+update_every=$(( update_every + 1 - 1)) # makes sure it is a number
+test $update_every -eq 0 && update_every=1 # if it is zero, make it 1
+
+# check the charts.d directory
+[ ! -d "$chartsd" ] && fatal "cannot find charts directory '$chartsd'"
+
+# -----------------------------------------------------------------------------
+# library functions
+
+fixid() {
+ echo "$*" |\
+ tr -c "[A-Z][a-z][0-9]" "_" |\
+ sed -e "s|^_\+||g" -e "s|_\+$||g" -e "s|_\+|_|g" |\
+ tr "[A-Z]" "[a-z]"
+}
+
+run() {
+ local ret pid="${BASHPID}" t
+
+ if [ "z${1}" = "z-t" -a "${2}" != "0" ]
+ then
+ t="${2}"
+ shift 2
+ timeout ${t} "${@}" 2>"${TMP_DIR}/run.${pid}"
+ ret=$?
+ else
+ "${@}" 2>"${TMP_DIR}/run.${pid}"
+ ret=$?
+ fi
+
+ if [ ${ret} -ne 0 ]
+ then
+ {
+ printf "$(logdate): ${PROGRAM_NAME}: ${status}: ${MODULE_NAME}: command '"
+ printf "%q " "${@}"
+ printf "' failed with code ${ret}:\n --- BEGIN TRACE ---\n"
+ cat "${TMP_DIR}/run.${pid}"
+ printf " --- END TRACE ---\n"
+ } >&2
+ fi
+ rm "${TMP_DIR}/run.${pid}"
+
+ return ${ret}
+}
+
+# convert any floating point number
+# to integer, give a multiplier
+# the result is stored in ${FLOAT2INT_RESULT}
+# so that no fork is necessary
+# the multiplier must be a power of 10
+float2int() {
+ local f m="$2" a b l v=($1)
+ f=${v[0]}
+
+ # the length of the multiplier - 1
+ l=$(( ${#m} - 1 ))
+
+ # check if the number is in scientific notation
+ if [[ ${f} =~ ^[[:space:]]*(-)?[0-9.]+(e|E)(\+|-)[0-9]+ ]]
+ then
+ # convert it to decimal
+ # unfortunately, this fork cannot be avoided
+ # if you know of a way to avoid it, please let me know
+ f=$(printf "%0.${l}f" ${f})
+ fi
+
+ # split the floating point number
+ # in integer (a) and decimal (b)
+ a=${f/.*/}
+ b=${f/*./}
+
+ # if the integer part is missing
+ # set it to zero
+ [ -z "${a}" ] && a="0"
+
+ # strip leading zeros from the integer part
+ # base 10 convertion
+ a=$((10#$a))
+
+ # check the length of the decimal part
+ # against the length of the multiplier
+ if [ ${#b} -gt ${l} ]
+ then
+ # too many digits - take the most significant
+ b=${b:0:${l}}
+
+ elif [ ${#b} -lt ${l} ]
+ then
+ # too few digits - pad with zero on the right
+ local z="00000000000000000000000" r=$((l - ${#b}))
+ b="${b}${z:0:${r}}"
+ fi
+
+ # strip leading zeros from the decimal part
+ # base 10 convertion
+ b=$((10#$b))
+
+ # store the result
+ FLOAT2INT_RESULT=$(( (a * m) + b ))
+}
+
+
+# -----------------------------------------------------------------------------
+# charts check functions
+
+all_charts() {
+ cd "$chartsd"
+ [ $? -ne 0 ] && error "cannot cd to $chartsd" && return 1
+
+ ls *.chart.sh | sed "s/\.chart\.sh$//g"
+}
+
+declare -A charts_enable_keyword=(
+ ['apache']="force"
+ ['cpu_apps']="force"
+ ['cpufreq']="force"
+ ['example']="force"
+ ['exim']="force"
+ ['hddtemp']="force"
+ ['load_average']="force"
+ ['mem_apps']="force"
+ ['mysql']="force"
+ ['nginx']="force"
+ ['phpfpm']="force"
+ ['postfix']="force"
+ ['sensors']="force"
+ ['squid']="force"
+ ['tomcat']="force"
+ )
+
+all_enabled_charts() {
+ local charts= enabled= required=
+
+ # find all enabled charts
+
+ for chart in $( all_charts )
+ do
+ MODULE_NAME="${chart}"
+
+ eval "enabled=\$$chart"
+ if [ -z "${enabled}" ]
+ then
+ enabled="${enable_all_charts}"
+ fi
+
+ required="${charts_enable_keyword[${chart}]}"
+ [ -z "${required}" ] && required="yes"
+
+ if [ ! "${enabled}" = "${required}" ]
+ then
+ info "is disabled. Add a line with $chart=$required in '${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}.conf' to enable it (or remove the line that disables it)."
+ else
+ debug "is enabled for auto-detection."
+ local charts="$charts $chart"
+ fi
+ done
+ MODULE_NAME="main"
+
+ local charts2=
+ for chart in $charts
+ do
+ MODULE_NAME="${chart}"
+
+ # check the enabled charts
+ local check="$( cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_check()" )"
+ if [ -z "$check" ]
+ then
+ error "module '$chart' does not seem to have a $chart$charts_check() function. Disabling it."
+ continue
+ fi
+
+ local create="$( cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_create()" )"
+ if [ -z "$create" ]
+ then
+ error "module '$chart' does not seem to have a $chart$charts_create() function. Disabling it."
+ continue
+ fi
+
+ local update="$( cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_update()" )"
+ if [ -z "$update" ]
+ then
+ error "module '$chart' does not seem to have a $chart$charts_update() function. Disabling it."
+ continue
+ fi
+
+ # check its config
+ #if [ -f "$userconfd/$chart.conf" ]
+ #then
+ # if [ ! -z "$( cat "$userconfd/$chart.conf" | sed "s/^ \+//g" | grep -v "^$" | grep -v "^#" | grep -v "^$chart$charts_undescore" )" ]
+ # then
+ # error "module's $chart config $userconfd/$chart.conf should only have lines starting with $chart$charts_undescore . Disabling it."
+ # continue
+ # fi
+ #fi
+
+ #if [ $dryrunner -eq 1 ]
+ # then
+ # "$pluginsd/charts.d.dryrun-helper.sh" "$chart" "$chartsd/$chart.chart.sh" "$userconfd/$chart.conf" >/dev/null
+ # if [ $? -ne 0 ]
+ # then
+ # error "module's $chart did not pass the dry run check. This means it uses global variables not starting with $chart. Disabling it."
+ # continue
+ # fi
+ #fi
+
+ local charts2="$charts2 $chart"
+ done
+ MODULE_NAME="main"
+
+ echo $charts2
+ debug "enabled charts: $charts2"
+}
+
+# -----------------------------------------------------------------------------
+# load the charts
+
+suffix_retries="_retries"
+suffix_update_every="_update_every"
+active_charts=
+for chart in $( all_enabled_charts )
+do
+ MODULE_NAME="${chart}"
+
+ debug "loading module: '$chartsd/$chart.chart.sh'"
+
+ source "$chartsd/$chart.chart.sh"
+ [ $? -ne 0 ] && warning "Module '$chartsd/$chart.chart.sh' loaded with errors."
+
+ # first load the stock config
+ if [ -f "$stockconfd/$chart.conf" ]
+ then
+ debug "loading module configuration: '$stockconfd/$chart.conf'"
+ source "$stockconfd/$chart.conf"
+ [ $? -ne 0 ] && warning "Config file '$stockconfd/$chart.conf' loaded with errors."
+ else
+ debug "not found module configuration: '$stockconfd/$chart.conf'"
+ fi
+
+ # then load the user config (it overwrites the stock)
+ if [ -f "$userconfd/$chart.conf" ]
+ then
+ debug "loading module configuration: '$userconfd/$chart.conf'"
+ source "$userconfd/$chart.conf"
+ [ $? -ne 0 ] && warning "Config file '$userconfd/$chart.conf' loaded with errors."
+ else
+ debug "not found module configuration: '$userconfd/$chart.conf'"
+
+ if [ -f "$olduserconfd/$chart.conf" ]
+ then
+ # support for very old netdata that had the charts.d module configs in /etc/netdata
+ info "loading module configuration from obsolete location: '$olduserconfd/$chart.conf'"
+ source "$olduserconfd/$chart.conf"
+ [ $? -ne 0 ] && warning "Config file '$olduserconfd/$chart.conf' loaded with errors."
+ fi
+ fi
+
+ eval "dt=\$$chart$suffix_update_every"
+ dt=$(( dt + 1 - 1 )) # make sure it is a number
+ if [ $dt -lt $update_every ]
+ then
+ eval "$chart$suffix_update_every=$update_every"
+ fi
+
+ $chart$charts_check
+ if [ $? -eq 0 ]
+ then
+ debug "module '$chart' activated"
+ active_charts="$active_charts $chart"
+ else
+ error "module's '$chart' check() function reports failure."
+ fi
+done
+MODULE_NAME="main"
+debug "activated modules: $active_charts"
+
+
+# -----------------------------------------------------------------------------
+# check overwrites
+
+# enable work time reporting
+debug_time=
+test $debug -eq 1 && debug_time=tellwork
+
+# if we only need a specific chart, remove all the others
+if [ ! -z "${chart_only}" ]
+then
+ debug "requested to run only for: '${chart_only}'"
+ check_charts=
+ for chart in $active_charts
+ do
+ if [ "$chart" = "$chart_only" ]
+ then
+ check_charts="$chart"
+ break
+ fi
+ done
+ active_charts="$check_charts"
+fi
+debug "activated charts: $active_charts"
+
+# stop if we just need a pre-check
+if [ $check -eq 1 ]
+then
+ info "CHECK RESULT"
+ info "Will run the charts: $active_charts"
+ exit 0
+fi
+
+# -----------------------------------------------------------------------------
+
+cd "${TMP_DIR}" || exit 1
+
+# -----------------------------------------------------------------------------
+# create charts
+
+run_charts=
+for chart in $active_charts
+do
+ MODULE_NAME="${chart}"
+
+ debug "calling '$chart$charts_create()'..."
+ $chart$charts_create
+ if [ $? -eq 0 ]
+ then
+ run_charts="$run_charts $chart"
+ debug "'$chart' initialized."
+ else
+ error "module's '$chart' function '$chart$charts_create()' reports failure."
+ fi
+done
+MODULE_NAME="main"
+debug "run_charts='$run_charts'"
+
+
+# -----------------------------------------------------------------------------
+# update dimensions
+
+[ -z "$run_charts" ] && fatal "No charts to collect data from."
+
+declare -A charts_last_update=() charts_update_every=() charts_retries=() charts_next_update=() charts_run_counter=() charts_serial_failures=()
+global_update() {
+ local exit_at \
+ c=0 dt ret last_ms exec_start_ms exec_end_ms \
+ chart now_charts=() next_charts=($run_charts) \
+ next_ms x seconds millis
+
+ # return the current time in ms in $now_ms
+ ${current_time_ms}
+
+ exit_at=$(( now_ms + (restart_timeout * 1000) ))
+
+ for chart in $run_charts
+ do
+ eval "charts_update_every[$chart]=\$$chart$suffix_update_every"
+ test -z "${charts_update_every[$chart]}" && charts_update_every[$chart]=$update_every
+
+ eval "charts_retries[$chart]=\$$chart$suffix_retries"
+ test -z "${charts_retries[$chart]}" && charts_retries[$chart]=10
+
+ charts_last_update[$chart]=$((now_ms - (now_ms % (charts_update_every[$chart] * 1000) ) ))
+ charts_next_update[$chart]=$(( charts_last_update[$chart] + (charts_update_every[$chart] * 1000) ))
+ charts_run_counter[$chart]=0
+ charts_serial_failures[$chart]=0
+
+ echo "CHART netdata.plugin_chartsd_$chart '' 'Execution time for $chart plugin' 'milliseconds / run' charts.d netdata.plugin_charts area 145000 ${charts_update_every[$chart]}"
+ echo "DIMENSION run_time 'run time' absolute 1 1"
+ done
+
+ # the main loop
+ while [ "${#next_charts[@]}" -gt 0 ]
+ do
+ c=$((c + 1))
+ now_charts=("${next_charts[@]}")
+ next_charts=()
+
+ # return the current time in ms in $now_ms
+ ${current_time_ms}
+
+ for chart in "${now_charts[@]}"
+ do
+ MODULE_NAME="${chart}"
+
+ if [ ${now_ms} -ge ${charts_next_update[$chart]} ]
+ then
+ last_ms=${charts_last_update[$chart]}
+ dt=$(( (now_ms - last_ms) ))
+
+ charts_last_update[$chart]=${now_ms}
+
+ while [ ${charts_next_update[$chart]} -lt ${now_ms} ]
+ do
+ charts_next_update[$chart]=$(( charts_next_update[$chart] + (charts_update_every[$chart] * 1000) ))
+ done
+
+ # the first call should not give a duration
+ # so that netdata calibrates to current time
+ dt=$(( dt * 1000 ))
+ charts_run_counter[$chart]=$(( charts_run_counter[$chart] + 1 ))
+ if [ ${charts_run_counter[$chart]} -eq 1 ]
+ then
+ dt=
+ fi
+
+ exec_start_ms=$now_ms
+ $chart$charts_update $dt
+ ret=$?
+
+ # return the current time in ms in $now_ms
+ ${current_time_ms}; exec_end_ms=$now_ms
+
+ echo "BEGIN netdata.plugin_chartsd_$chart $dt"
+ echo "SET run_time = $(( exec_end_ms - exec_start_ms ))"
+ echo "END"
+
+ if [ $ret -eq 0 ]
+ then
+ charts_serial_failures[$chart]=0
+ next_charts+=($chart)
+ else
+ charts_serial_failures[$chart]=$(( charts_serial_failures[$chart] + 1 ))
+
+ if [ ${charts_serial_failures[$chart]} -gt ${charts_retries[$chart]} ]
+ then
+ error "module's '$chart' update() function reported failure ${charts_serial_failures[$chart]} times. Disabling it."
+ else
+ error "module's '$chart' update() function reports failure. Will keep trying for a while."
+ next_charts+=($chart)
+ fi
+ fi
+ else
+ next_charts+=($chart)
+ fi
+ done
+ MODULE_NAME="${chart}"
+
+ # wait the time you are required to
+ next_ms=$((now_ms + (update_every * 1000 * 100) ))
+ for x in "${charts_next_update[@]}"; do [ ${x} -lt ${next_ms} ] && next_ms=${x}; done
+ next_ms=$((next_ms - now_ms))
+
+ if [ ${LOOPSLEEPMS_HIGHRES} -eq 1 -a ${next_ms} -gt 0 ]
+ then
+ next_ms=$(( next_ms + current_time_ms_accuracy ))
+ seconds=$(( next_ms / 1000 ))
+ millis=$(( next_ms % 1000 ))
+ if [ ${millis} -lt 10 ]
+ then
+ millis="00${millis}"
+ elif [ ${millis} -lt 100 ]
+ then
+ millis="0${millis}"
+ fi
+
+ debug "sleeping for ${seconds}.${millis} seconds."
+ ${mysleep} ${seconds}.${millis}
+ else
+ debug "sleeping for ${update_every} seconds."
+ ${mysleep} $update_every
+ fi
+
+ test ${now_ms} -ge ${exit_at} && exit 0
+ done
+
+ fatal "nothing left to do, exiting..."
+}
+
+global_update
diff --git a/collectors/charts.d.plugin/charts.d.plugin.in b/collectors/charts.d.plugin/charts.d.plugin.in
new file mode 100755
index 000000000..3477894d8
--- /dev/null
+++ b/collectors/charts.d.plugin/charts.d.plugin.in
@@ -0,0 +1,743 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+#
+# charts.d.plugin allows easy development of BASH plugins
+#
+# if you need to run parallel charts.d processes, link this file to a different name
+# in the same directory, with a .plugin suffix and netdata will start both of them,
+# each will have a different config file and modules configuration directory.
+#
+
+export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin"
+
+PROGRAM_FILE="$0"
+PROGRAM_NAME="$(basename $0)"
+PROGRAM_NAME="${PROGRAM_NAME/.plugin}"
+MODULE_NAME="main"
+
+# -----------------------------------------------------------------------------
+# create temp dir
+
+debug=0
+TMP_DIR=
+chartsd_cleanup() {
+ trap '' EXIT QUIT HUP INT TERM
+
+ if [ ! -z "$TMP_DIR" -a -d "$TMP_DIR" ]
+ then
+ [ $debug -eq 1 ] && echo >&2 "$PROGRAM_NAME: cleaning up temporary directory $TMP_DIR ..."
+ rm -rf "$TMP_DIR"
+ fi
+ exit 0
+}
+trap chartsd_cleanup EXIT QUIT HUP INT TERM
+
+if [ $UID = "0" ]
+then
+ TMP_DIR="$( mktemp -d /var/run/netdata-${PROGRAM_NAME}-XXXXXXXXXX )"
+else
+ TMP_DIR="$( mktemp -d /tmp/.netdata-${PROGRAM_NAME}-XXXXXXXXXX )"
+fi
+
+logdate() {
+ date "+%Y-%m-%d %H:%M:%S"
+}
+
+log() {
+ local status="${1}"
+ shift
+
+ echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${MODULE_NAME}: ${*}"
+
+}
+
+warning() {
+ log WARNING "${@}"
+}
+
+error() {
+ log ERROR "${@}"
+}
+
+info() {
+ log INFO "${@}"
+}
+
+fatal() {
+ log FATAL "${@}"
+ echo "DISABLE"
+ exit 1
+}
+
+debug() {
+ [ $debug -eq 1 ] && log DEBUG "${@}"
+}
+
+# -----------------------------------------------------------------------------
+# check a few commands
+
+require_cmd() {
+ local x=$(which "${1}" 2>/dev/null || command -v "${1}" 2>/dev/null)
+ if [ -z "${x}" -o ! -x "${x}" ]
+ then
+ warning "command '${1}' is not found in ${PATH}."
+ eval "${1^^}_CMD=\"\""
+ return 1
+ fi
+
+ eval "${1^^}_CMD=\"${x}\""
+ return 0
+}
+
+require_cmd date || exit 1
+require_cmd sed || exit 1
+require_cmd basename || exit 1
+require_cmd dirname || exit 1
+require_cmd cat || exit 1
+require_cmd grep || exit 1
+require_cmd egrep || exit 1
+require_cmd mktemp || exit 1
+require_cmd awk || exit 1
+require_cmd timeout || exit 1
+require_cmd curl || exit 1
+
+# -----------------------------------------------------------------------------
+
+[ $(( ${BASH_VERSINFO[0]} )) -lt 4 ] && fatal "BASH version 4 or later is required, but found version: ${BASH_VERSION}. Please upgrade."
+
+info "started from '$PROGRAM_FILE' with options: $*"
+
+# -----------------------------------------------------------------------------
+# internal defaults
+# netdata exposes a few environment variables for us
+
+[ -z "${NETDATA_PLUGINS_DIR}" ] && NETDATA_PLUGINS_DIR="$(dirname "${0}")"
+[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="@configdir_POST@"
+[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="@libconfigdir_POST@"
+
+pluginsd="${NETDATA_PLUGINS_DIR}"
+stockconfd="${NETDATA_STOCK_CONFIG_DIR}/${PROGRAM_NAME}"
+userconfd="${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}"
+olduserconfd="${NETDATA_USER_CONFIG_DIR}"
+chartsd="$pluginsd/../charts.d"
+
+minimum_update_frequency="${NETDATA_UPDATE_EVERY-1}"
+update_every=${minimum_update_frequency} # this will be overwritten by the command line
+
+# work around for non BASH shells
+charts_create="_create"
+charts_update="_update"
+charts_check="_check"
+charts_undescore="_"
+
+# when making iterations, charts.d can loop more frequently
+# to prevent plugins missing iterations.
+# this is a percentage relative to update_every to align its
+# iterations.
+# The minimum is 10%, the maximum 100%.
+# So, if update_every is 1 second and time_divisor is 50,
+# charts.d will iterate every 500ms.
+# Charts will be called to collect data only if the time
+# passed since the last time the collected data is equal or
+# above their update_every.
+time_divisor=50
+
+# number of seconds to run without restart
+# after this time, charts.d.plugin will exit
+# netdata will restart it
+restart_timeout=$((3600 * 4))
+
+# check if the charts.d plugins are using global variables
+# they should not.
+# It does not currently support BASH v4 arrays, so it is
+# disabled
+dryrunner=0
+
+# check for timeout command
+check_for_timeout=1
+
+# the default enable/disable value for all charts
+enable_all_charts="yes"
+
+# -----------------------------------------------------------------------------
+# parse parameters
+
+check=0
+chart_only=
+while [ ! -z "$1" ]
+do
+ if [ "$1" = "check" ]
+ then
+ check=1
+ shift
+ continue
+ fi
+
+ if [ "$1" = "debug" -o "$1" = "all" ]
+ then
+ debug=1
+ shift
+ continue
+ fi
+
+ if [ -f "$chartsd/$1.chart.sh" ]
+ then
+ debug=1
+ chart_only="$( echo $1.chart.sh | sed "s/\.chart\.sh$//g" )"
+ shift
+ continue
+ fi
+
+ if [ -f "$chartsd/$1" ]
+ then
+ debug=1
+ chart_only="$( echo $1 | sed "s/\.chart\.sh$//g" )"
+ shift
+ continue
+ fi
+
+ # number check
+ n="$1"
+ x=$(( n ))
+ if [ "$x" = "$n" ]
+ then
+ shift
+ update_every=$x
+ [ $update_every -lt $minimum_update_frequency ] && update_every=$minimum_update_frequency
+ continue
+ fi
+
+ fatal "Cannot understand parameter $1. Aborting."
+done
+
+
+# -----------------------------------------------------------------------------
+# loop control
+
+# default sleep function
+LOOPSLEEPMS_HIGHRES=0
+now_ms=
+current_time_ms_default() {
+ now_ms="$(date +'%s')000"
+}
+current_time_ms="current_time_ms_default"
+current_time_ms_accuracy=1
+mysleep="sleep"
+
+# if found and included, this file overwrites loopsleepms()
+# and current_time_ms() with a high resolution timer function
+# for precise looping.
+source "$pluginsd/loopsleepms.sh.inc"
+[ $? -ne 0 ] && error "Failed to load '$pluginsd/loopsleepms.sh.inc'."
+
+# -----------------------------------------------------------------------------
+# load my configuration
+
+for myconfig in "${NETDATA_STOCK_CONFIG_DIR}/${PROGRAM_NAME}.conf" "${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}.conf"
+do
+ if [ -f "$myconfig" ]
+ then
+ source "$myconfig"
+ if [ $? -ne 0 ]
+ then
+ error "Config file '$myconfig' loaded with errors."
+ else
+ info "Configuration file '$myconfig' loaded."
+ fi
+ else
+ warning "Configuration file '$myconfig' not found."
+ fi
+done
+
+# make sure time_divisor is right
+time_divisor=$((time_divisor))
+[ $time_divisor -lt 10 ] && time_divisor=10
+[ $time_divisor -gt 100 ] && time_divisor=100
+
+
+# we check for the timeout command, after we load our
+# configuration, so that the user may overwrite the
+# timeout command we use, providing a function that
+# can emulate the timeout command we need:
+# > timeout SECONDS command ...
+if [ $check_for_timeout -eq 1 ]
+ then
+ require_cmd timeout || exit 1
+fi
+
+# -----------------------------------------------------------------------------
+# internal checks
+
+# netdata passes the requested update frequency as the first argument
+update_every=$(( update_every + 1 - 1)) # makes sure it is a number
+test $update_every -eq 0 && update_every=1 # if it is zero, make it 1
+
+# check the charts.d directory
+[ ! -d "$chartsd" ] && fatal "cannot find charts directory '$chartsd'"
+
+# -----------------------------------------------------------------------------
+# library functions
+
+fixid() {
+ echo "$*" |\
+ tr -c "[A-Z][a-z][0-9]" "_" |\
+ sed -e "s|^_\+||g" -e "s|_\+$||g" -e "s|_\+|_|g" |\
+ tr "[A-Z]" "[a-z]"
+}
+
+run() {
+ local ret pid="${BASHPID}" t
+
+ if [ "z${1}" = "z-t" -a "${2}" != "0" ]
+ then
+ t="${2}"
+ shift 2
+ timeout ${t} "${@}" 2>"${TMP_DIR}/run.${pid}"
+ ret=$?
+ else
+ "${@}" 2>"${TMP_DIR}/run.${pid}"
+ ret=$?
+ fi
+
+ if [ ${ret} -ne 0 ]
+ then
+ {
+ printf "$(logdate): ${PROGRAM_NAME}: ${status}: ${MODULE_NAME}: command '"
+ printf "%q " "${@}"
+ printf "' failed with code ${ret}:\n --- BEGIN TRACE ---\n"
+ cat "${TMP_DIR}/run.${pid}"
+ printf " --- END TRACE ---\n"
+ } >&2
+ fi
+ rm "${TMP_DIR}/run.${pid}"
+
+ return ${ret}
+}
+
+# convert any floating point number
+# to integer, give a multiplier
+# the result is stored in ${FLOAT2INT_RESULT}
+# so that no fork is necessary
+# the multiplier must be a power of 10
+float2int() {
+ local f m="$2" a b l v=($1)
+ f=${v[0]}
+
+ # the length of the multiplier - 1
+ l=$(( ${#m} - 1 ))
+
+ # check if the number is in scientific notation
+ if [[ ${f} =~ ^[[:space:]]*(-)?[0-9.]+(e|E)(\+|-)[0-9]+ ]]
+ then
+ # convert it to decimal
+ # unfortunately, this fork cannot be avoided
+ # if you know of a way to avoid it, please let me know
+ f=$(printf "%0.${l}f" ${f})
+ fi
+
+ # split the floating point number
+ # in integer (a) and decimal (b)
+ a=${f/.*/}
+ b=${f/*./}
+
+ # if the integer part is missing
+ # set it to zero
+ [ -z "${a}" ] && a="0"
+
+ # strip leading zeros from the integer part
+ # base 10 convertion
+ a=$((10#$a))
+
+ # check the length of the decimal part
+ # against the length of the multiplier
+ if [ ${#b} -gt ${l} ]
+ then
+ # too many digits - take the most significant
+ b=${b:0:${l}}
+
+ elif [ ${#b} -lt ${l} ]
+ then
+ # too few digits - pad with zero on the right
+ local z="00000000000000000000000" r=$((l - ${#b}))
+ b="${b}${z:0:${r}}"
+ fi
+
+ # strip leading zeros from the decimal part
+ # base 10 convertion
+ b=$((10#$b))
+
+ # store the result
+ FLOAT2INT_RESULT=$(( (a * m) + b ))
+}
+
+
+# -----------------------------------------------------------------------------
+# charts check functions
+
+all_charts() {
+ cd "$chartsd"
+ [ $? -ne 0 ] && error "cannot cd to $chartsd" && return 1
+
+ ls *.chart.sh | sed "s/\.chart\.sh$//g"
+}
+
+declare -A charts_enable_keyword=(
+ ['apache']="force"
+ ['cpu_apps']="force"
+ ['cpufreq']="force"
+ ['example']="force"
+ ['exim']="force"
+ ['hddtemp']="force"
+ ['load_average']="force"
+ ['mem_apps']="force"
+ ['mysql']="force"
+ ['nginx']="force"
+ ['phpfpm']="force"
+ ['postfix']="force"
+ ['sensors']="force"
+ ['squid']="force"
+ ['tomcat']="force"
+ )
+
+all_enabled_charts() {
+ local charts= enabled= required=
+
+ # find all enabled charts
+
+ for chart in $( all_charts )
+ do
+ MODULE_NAME="${chart}"
+
+ eval "enabled=\$$chart"
+ if [ -z "${enabled}" ]
+ then
+ enabled="${enable_all_charts}"
+ fi
+
+ required="${charts_enable_keyword[${chart}]}"
+ [ -z "${required}" ] && required="yes"
+
+ if [ ! "${enabled}" = "${required}" ]
+ then
+ info "is disabled. Add a line with $chart=$required in '${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}.conf' to enable it (or remove the line that disables it)."
+ else
+ debug "is enabled for auto-detection."
+ local charts="$charts $chart"
+ fi
+ done
+ MODULE_NAME="main"
+
+ local charts2=
+ for chart in $charts
+ do
+ MODULE_NAME="${chart}"
+
+ # check the enabled charts
+ local check="$( cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_check()" )"
+ if [ -z "$check" ]
+ then
+ error "module '$chart' does not seem to have a $chart$charts_check() function. Disabling it."
+ continue
+ fi
+
+ local create="$( cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_create()" )"
+ if [ -z "$create" ]
+ then
+ error "module '$chart' does not seem to have a $chart$charts_create() function. Disabling it."
+ continue
+ fi
+
+ local update="$( cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_update()" )"
+ if [ -z "$update" ]
+ then
+ error "module '$chart' does not seem to have a $chart$charts_update() function. Disabling it."
+ continue
+ fi
+
+ # check its config
+ #if [ -f "$userconfd/$chart.conf" ]
+ #then
+ # if [ ! -z "$( cat "$userconfd/$chart.conf" | sed "s/^ \+//g" | grep -v "^$" | grep -v "^#" | grep -v "^$chart$charts_undescore" )" ]
+ # then
+ # error "module's $chart config $userconfd/$chart.conf should only have lines starting with $chart$charts_undescore . Disabling it."
+ # continue
+ # fi
+ #fi
+
+ #if [ $dryrunner -eq 1 ]
+ # then
+ # "$pluginsd/charts.d.dryrun-helper.sh" "$chart" "$chartsd/$chart.chart.sh" "$userconfd/$chart.conf" >/dev/null
+ # if [ $? -ne 0 ]
+ # then
+ # error "module's $chart did not pass the dry run check. This means it uses global variables not starting with $chart. Disabling it."
+ # continue
+ # fi
+ #fi
+
+ local charts2="$charts2 $chart"
+ done
+ MODULE_NAME="main"
+
+ echo $charts2
+ debug "enabled charts: $charts2"
+}
+
+# -----------------------------------------------------------------------------
+# load the charts
+
+suffix_retries="_retries"
+suffix_update_every="_update_every"
+active_charts=
+for chart in $( all_enabled_charts )
+do
+ MODULE_NAME="${chart}"
+
+ debug "loading module: '$chartsd/$chart.chart.sh'"
+
+ source "$chartsd/$chart.chart.sh"
+ [ $? -ne 0 ] && warning "Module '$chartsd/$chart.chart.sh' loaded with errors."
+
+ # first load the stock config
+ if [ -f "$stockconfd/$chart.conf" ]
+ then
+ debug "loading module configuration: '$stockconfd/$chart.conf'"
+ source "$stockconfd/$chart.conf"
+ [ $? -ne 0 ] && warning "Config file '$stockconfd/$chart.conf' loaded with errors."
+ else
+ debug "not found module configuration: '$stockconfd/$chart.conf'"
+ fi
+
+ # then load the user config (it overwrites the stock)
+ if [ -f "$userconfd/$chart.conf" ]
+ then
+ debug "loading module configuration: '$userconfd/$chart.conf'"
+ source "$userconfd/$chart.conf"
+ [ $? -ne 0 ] && warning "Config file '$userconfd/$chart.conf' loaded with errors."
+ else
+ debug "not found module configuration: '$userconfd/$chart.conf'"
+
+ if [ -f "$olduserconfd/$chart.conf" ]
+ then
+ # support for very old netdata that had the charts.d module configs in /etc/netdata
+ info "loading module configuration from obsolete location: '$olduserconfd/$chart.conf'"
+ source "$olduserconfd/$chart.conf"
+ [ $? -ne 0 ] && warning "Config file '$olduserconfd/$chart.conf' loaded with errors."
+ fi
+ fi
+
+ eval "dt=\$$chart$suffix_update_every"
+ dt=$(( dt + 1 - 1 )) # make sure it is a number
+ if [ $dt -lt $update_every ]
+ then
+ eval "$chart$suffix_update_every=$update_every"
+ fi
+
+ $chart$charts_check
+ if [ $? -eq 0 ]
+ then
+ debug "module '$chart' activated"
+ active_charts="$active_charts $chart"
+ else
+ error "module's '$chart' check() function reports failure."
+ fi
+done
+MODULE_NAME="main"
+debug "activated modules: $active_charts"
+
+
+# -----------------------------------------------------------------------------
+# check overwrites
+
+# enable work time reporting
+debug_time=
+test $debug -eq 1 && debug_time=tellwork
+
+# if we only need a specific chart, remove all the others
+if [ ! -z "${chart_only}" ]
+then
+ debug "requested to run only for: '${chart_only}'"
+ check_charts=
+ for chart in $active_charts
+ do
+ if [ "$chart" = "$chart_only" ]
+ then
+ check_charts="$chart"
+ break
+ fi
+ done
+ active_charts="$check_charts"
+fi
+debug "activated charts: $active_charts"
+
+# stop if we just need a pre-check
+if [ $check -eq 1 ]
+then
+ info "CHECK RESULT"
+ info "Will run the charts: $active_charts"
+ exit 0
+fi
+
+# -----------------------------------------------------------------------------
+
+cd "${TMP_DIR}" || exit 1
+
+# -----------------------------------------------------------------------------
+# create charts
+
+run_charts=
+for chart in $active_charts
+do
+ MODULE_NAME="${chart}"
+
+ debug "calling '$chart$charts_create()'..."
+ $chart$charts_create
+ if [ $? -eq 0 ]
+ then
+ run_charts="$run_charts $chart"
+ debug "'$chart' initialized."
+ else
+ error "module's '$chart' function '$chart$charts_create()' reports failure."
+ fi
+done
+MODULE_NAME="main"
+debug "run_charts='$run_charts'"
+
+
+# -----------------------------------------------------------------------------
+# update dimensions
+
+[ -z "$run_charts" ] && fatal "No charts to collect data from."
+
+declare -A charts_last_update=() charts_update_every=() charts_retries=() charts_next_update=() charts_run_counter=() charts_serial_failures=()
+global_update() {
+ local exit_at \
+ c=0 dt ret last_ms exec_start_ms exec_end_ms \
+ chart now_charts=() next_charts=($run_charts) \
+ next_ms x seconds millis
+
+ # return the current time in ms in $now_ms
+ ${current_time_ms}
+
+ exit_at=$(( now_ms + (restart_timeout * 1000) ))
+
+ for chart in $run_charts
+ do
+ eval "charts_update_every[$chart]=\$$chart$suffix_update_every"
+ test -z "${charts_update_every[$chart]}" && charts_update_every[$chart]=$update_every
+
+ eval "charts_retries[$chart]=\$$chart$suffix_retries"
+ test -z "${charts_retries[$chart]}" && charts_retries[$chart]=10
+
+ charts_last_update[$chart]=$((now_ms - (now_ms % (charts_update_every[$chart] * 1000) ) ))
+ charts_next_update[$chart]=$(( charts_last_update[$chart] + (charts_update_every[$chart] * 1000) ))
+ charts_run_counter[$chart]=0
+ charts_serial_failures[$chart]=0
+
+ echo "CHART netdata.plugin_chartsd_$chart '' 'Execution time for $chart plugin' 'milliseconds / run' charts.d netdata.plugin_charts area 145000 ${charts_update_every[$chart]}"
+ echo "DIMENSION run_time 'run time' absolute 1 1"
+ done
+
+ # the main loop
+ while [ "${#next_charts[@]}" -gt 0 ]
+ do
+ c=$((c + 1))
+ now_charts=("${next_charts[@]}")
+ next_charts=()
+
+ # return the current time in ms in $now_ms
+ ${current_time_ms}
+
+ for chart in "${now_charts[@]}"
+ do
+ MODULE_NAME="${chart}"
+
+ if [ ${now_ms} -ge ${charts_next_update[$chart]} ]
+ then
+ last_ms=${charts_last_update[$chart]}
+ dt=$(( (now_ms - last_ms) ))
+
+ charts_last_update[$chart]=${now_ms}
+
+ while [ ${charts_next_update[$chart]} -lt ${now_ms} ]
+ do
+ charts_next_update[$chart]=$(( charts_next_update[$chart] + (charts_update_every[$chart] * 1000) ))
+ done
+
+ # the first call should not give a duration
+ # so that netdata calibrates to current time
+ dt=$(( dt * 1000 ))
+ charts_run_counter[$chart]=$(( charts_run_counter[$chart] + 1 ))
+ if [ ${charts_run_counter[$chart]} -eq 1 ]
+ then
+ dt=
+ fi
+
+ exec_start_ms=$now_ms
+ $chart$charts_update $dt
+ ret=$?
+
+ # return the current time in ms in $now_ms
+ ${current_time_ms}; exec_end_ms=$now_ms
+
+ echo "BEGIN netdata.plugin_chartsd_$chart $dt"
+ echo "SET run_time = $(( exec_end_ms - exec_start_ms ))"
+ echo "END"
+
+ if [ $ret -eq 0 ]
+ then
+ charts_serial_failures[$chart]=0
+ next_charts+=($chart)
+ else
+ charts_serial_failures[$chart]=$(( charts_serial_failures[$chart] + 1 ))
+
+ if [ ${charts_serial_failures[$chart]} -gt ${charts_retries[$chart]} ]
+ then
+ error "module's '$chart' update() function reported failure ${charts_serial_failures[$chart]} times. Disabling it."
+ else
+ error "module's '$chart' update() function reports failure. Will keep trying for a while."
+ next_charts+=($chart)
+ fi
+ fi
+ else
+ next_charts+=($chart)
+ fi
+ done
+ MODULE_NAME="${chart}"
+
+ # wait the time you are required to
+ next_ms=$((now_ms + (update_every * 1000 * 100) ))
+ for x in "${charts_next_update[@]}"; do [ ${x} -lt ${next_ms} ] && next_ms=${x}; done
+ next_ms=$((next_ms - now_ms))
+
+ if [ ${LOOPSLEEPMS_HIGHRES} -eq 1 -a ${next_ms} -gt 0 ]
+ then
+ next_ms=$(( next_ms + current_time_ms_accuracy ))
+ seconds=$(( next_ms / 1000 ))
+ millis=$(( next_ms % 1000 ))
+ if [ ${millis} -lt 10 ]
+ then
+ millis="00${millis}"
+ elif [ ${millis} -lt 100 ]
+ then
+ millis="0${millis}"
+ fi
+
+ debug "sleeping for ${seconds}.${millis} seconds."
+ ${mysleep} ${seconds}.${millis}
+ else
+ debug "sleeping for ${update_every} seconds."
+ ${mysleep} $update_every
+ fi
+
+ test ${now_ms} -ge ${exit_at} && exit 0
+ done
+
+ fatal "nothing left to do, exiting..."
+}
+
+global_update
diff --git a/collectors/charts.d.plugin/cpu_apps/Makefile.inc b/collectors/charts.d.plugin/cpu_apps/Makefile.inc
new file mode 100644
index 000000000..a35f82837
--- /dev/null
+++ b/collectors/charts.d.plugin/cpu_apps/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += cpu_apps/cpu_apps.chart.sh
+dist_chartsconfig_DATA += cpu_apps/cpu_apps.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += cpu_apps/README.md cpu_apps/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/cpu_apps/README.md b/collectors/charts.d.plugin/cpu_apps/README.md
new file mode 100644
index 000000000..cd8adf0a2
--- /dev/null
+++ b/collectors/charts.d.plugin/cpu_apps/README.md
@@ -0,0 +1,2 @@
+> THIS MODULE IS OBSOLETE.
+> USE APPS.PLUGIN.
diff --git a/collectors/charts.d.plugin/cpu_apps/cpu_apps.chart.sh b/collectors/charts.d.plugin/cpu_apps/cpu_apps.chart.sh
new file mode 100644
index 000000000..869464afe
--- /dev/null
+++ b/collectors/charts.d.plugin/cpu_apps/cpu_apps.chart.sh
@@ -0,0 +1,72 @@
+# shellcheck shell=bash disable=SC2154,SC1072,SC1073,SC2009,SC2162,SC2006,SC2002,SC2086,SC1117
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+#
+# THIS PLUGIN IS OBSOLETE
+# USE apps.plugin INSTEAD
+
+# a space separated list of command to monitor
+cpu_apps_apps=
+
+# these are required for computing memory in bytes and cpu in seconds
+#cpu_apps_pagesize="`getconf PAGESIZE`"
+cpu_apps_clockticks="$(getconf CLK_TCK)"
+
+cpu_apps_update_every=60
+
+cpu_apps_check() {
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
+
+ if [ -z "$cpu_apps_apps" ]
+ then
+ error "manual configuration required: please set cpu_apps_apps='command1 command2 ...' in $confd/cpu_apps_apps.conf"
+ return 1
+ fi
+ return 0
+}
+
+cpu_apps_bc_finalze=
+
+cpu_apps_create() {
+
+ echo "CHART chartsd_apps.cpu '' 'Apps CPU' 'milliseconds / $cpu_apps_update_every sec' apps apps stacked 20001 $cpu_apps_update_every"
+
+ local x=
+ for x in $cpu_apps_apps
+ do
+ echo "DIMENSION $x $x incremental 1000 $cpu_apps_clockticks"
+
+ # this string is needed later in the update() function
+ # to finalize the instructions for the bc command
+ cpu_apps_bc_finalze="$cpu_apps_bc_finalze \"SET $x = \"; $x;"
+ done
+ return 0
+}
+
+cpu_apps_update() {
+ # do all the work to collect / calculate the values
+ # for each dimension
+ # remember: KEEP IT SIMPLE AND SHORT
+
+ echo "BEGIN chartsd_apps.cpu"
+ ps -o pid,comm -C "$cpu_apps_apps" |\
+ grep -v "COMMAND" |\
+ (
+ while read pid name
+ do
+ echo "$name+=`cat /proc/$pid/stat | cut -d ' ' -f 14-15`"
+ done
+ ) |\
+ ( sed -e "s/ \+/ /g" -e "s/ /+/g";
+ echo "$cpu_apps_bc_finalze"
+ ) | bc
+ echo "END"
+
+ return 0
+}
diff --git a/collectors/charts.d.plugin/cpu_apps/cpu_apps.conf b/collectors/charts.d.plugin/cpu_apps/cpu_apps.conf
new file mode 100644
index 000000000..850cd0c6f
--- /dev/null
+++ b/collectors/charts.d.plugin/cpu_apps/cpu_apps.conf
@@ -0,0 +1,19 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+# THIS PLUGIN IS DEPRECATED
+# app.plugin can do better
+
+#cpu_apps_apps=
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#cpu_apps_update_every=2
+
+# the number of retries to do in case of failure
+# before disabling the module
+#cpu_apps_retries=10
diff --git a/collectors/charts.d.plugin/cpufreq/Makefile.inc b/collectors/charts.d.plugin/cpufreq/Makefile.inc
new file mode 100644
index 000000000..682379133
--- /dev/null
+++ b/collectors/charts.d.plugin/cpufreq/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += cpufreq/cpufreq.chart.sh
+dist_chartsconfig_DATA += cpufreq/cpufreq.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += cpufreq/README.md cpufreq/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/cpufreq/README.md b/collectors/charts.d.plugin/cpufreq/README.md
new file mode 100644
index 000000000..d82951aac
--- /dev/null
+++ b/collectors/charts.d.plugin/cpufreq/README.md
@@ -0,0 +1,2 @@
+> THIS MODULE IS OBSOLETE.
+> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
diff --git a/collectors/charts.d.plugin/cpufreq/cpufreq.chart.sh b/collectors/charts.d.plugin/cpufreq/cpufreq.chart.sh
new file mode 100644
index 000000000..1fc6caabf
--- /dev/null
+++ b/collectors/charts.d.plugin/cpufreq/cpufreq.chart.sh
@@ -0,0 +1,90 @@
+# shellcheck shell=bash
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+#
+
+# if this chart is called X.chart.sh, then all functions and global variables
+# must start with X_
+
+cpufreq_sys_dir="${NETDATA_HOST_PREFIX}/sys/devices"
+cpufreq_sys_depth=10
+cpufreq_source_update=1
+
+# _update_every is a special variable - it holds the number of seconds
+# between the calls of the _update() function
+cpufreq_update_every=
+cpufreq_priority=10000
+
+cpufreq_find_all_files() {
+ find "$1" -maxdepth $cpufreq_sys_depth -name scaling_cur_freq 2>/dev/null
+}
+
+# _check is called once, to find out if this chart should be enabled or not
+cpufreq_check() {
+
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
+
+ [ -z "$( cpufreq_find_all_files "$cpufreq_sys_dir" )" ] && return 1
+ return 0
+}
+
+# _create is called once, to create the charts
+cpufreq_create() {
+ local dir file id i
+
+ # we create a script with the source of the
+ # cpufreq_update() function
+ # - the highest speed we can achieve -
+ [ $cpufreq_source_update -eq 1 ] && echo >"$TMP_DIR/cpufreq.sh" "cpufreq_update() {"
+
+ echo "CHART cpu.cpufreq '' 'CPU Clock' 'MHz' 'cpufreq' '' line $((cpufreq_priority + 1)) $cpufreq_update_every"
+ echo >>"$TMP_DIR/cpufreq.sh" "echo \"BEGIN cpu.cpufreq \$1\""
+
+ i=0
+ for file in $( cpufreq_find_all_files "$cpufreq_sys_dir" | sort -u )
+ do
+ i=$(( i + 1 ))
+ dir=$( dirname "$file" )
+ cpu=
+
+ [ -f "$dir/affected_cpus" ] && cpu=$( cat "$dir/affected_cpus" )
+ [ -z "$cpu" ] && cpu="$i.a"
+
+ id="$( fixid "cpu$cpu" )"
+
+ debug "file='$file', dir='$dir', cpu='$cpu', id='$id'"
+
+ echo "DIMENSION $id '$id' absolute 1 1000"
+ echo >>"$TMP_DIR/cpufreq.sh" "echo \"SET $id = \"\$(< $file )"
+ done
+ echo >>"$TMP_DIR/cpufreq.sh" "echo END"
+
+ [ $cpufreq_source_update -eq 1 ] && echo >>"$TMP_DIR/cpufreq.sh" "}"
+
+ # ok, load the function cpufreq_update() we created
+ # shellcheck disable=SC1090
+ [ $cpufreq_source_update -eq 1 ] && . "$TMP_DIR/cpufreq.sh"
+
+ return 0
+}
+
+# _update is called continuously, to collect the values
+cpufreq_update() {
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
+
+ # do all the work to collect / calculate the values
+ # for each dimension
+ # remember: KEEP IT SIMPLE AND SHORT
+ # shellcheck disable=SC1090
+ [ $cpufreq_source_update -eq 0 ] && . "$TMP_DIR/cpufreq.sh" "$1"
+
+ return 0
+}
+
diff --git a/collectors/charts.d.plugin/cpufreq/cpufreq.conf b/collectors/charts.d.plugin/cpufreq/cpufreq.conf
new file mode 100644
index 000000000..7130555af
--- /dev/null
+++ b/collectors/charts.d.plugin/cpufreq/cpufreq.conf
@@ -0,0 +1,24 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+# THIS PLUGIN IS DEPRECATED
+# USE THE PYTHON.D ONE
+
+#cpufreq_sys_dir="/sys/devices"
+#cpufreq_sys_depth=10
+#cpufreq_source_update=1
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#cpufreq_update_every=
+
+# the charts priority on the dashboard
+#cpufreq_priority=10000
+
+# the number of retries to do in case of failure
+# before disabling the module
+#cpufreq_retries=10
diff --git a/collectors/charts.d.plugin/example/Makefile.inc b/collectors/charts.d.plugin/example/Makefile.inc
new file mode 100644
index 000000000..e6838fbbe
--- /dev/null
+++ b/collectors/charts.d.plugin/example/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += example/example.chart.sh
+dist_chartsconfig_DATA += example/example.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += example/README.md example/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/example/README.md b/collectors/charts.d.plugin/example/README.md
new file mode 100644
index 000000000..bfd5e210a
--- /dev/null
+++ b/collectors/charts.d.plugin/example/README.md
@@ -0,0 +1,2 @@
+This is just an example charts.d data collector.
+
diff --git a/collectors/charts.d.plugin/example/example.chart.sh b/collectors/charts.d.plugin/example/example.chart.sh
new file mode 100644
index 000000000..1562c597a
--- /dev/null
+++ b/collectors/charts.d.plugin/example/example.chart.sh
@@ -0,0 +1,126 @@
+# shellcheck shell=bash
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+#
+
+# if this chart is called X.chart.sh, then all functions and global variables
+# must start with X_
+
+# _update_every is a special variable - it holds the number of seconds
+# between the calls of the _update() function
+example_update_every=
+
+# the priority is used to sort the charts on the dashboard
+# 1 = the first chart
+example_priority=150000
+
+# to enable this chart, you have to set this to 12345
+# (just a demonstration for something that needs to be checked)
+example_magic_number=
+
+# global variables to store our collected data
+# remember: they need to start with the module name example_
+example_value1=
+example_value2=
+example_value3=
+example_value4=
+example_last=0
+example_count=0
+
+example_get() {
+ # do all the work to collect / calculate the values
+ # for each dimension
+ #
+ # Remember:
+ # 1. KEEP IT SIMPLE AND SHORT
+ # 2. AVOID FORKS (avoid piping commands)
+ # 3. AVOID CALLING TOO MANY EXTERNAL PROGRAMS
+ # 4. USE LOCAL VARIABLES (global variables may overlap with other modules)
+
+ example_value1=$RANDOM
+ example_value2=$RANDOM
+ example_value3=$RANDOM
+ example_value4=$((8192 + (RANDOM * 16383 / 32767) ))
+
+ if [ $example_count -gt 0 ]
+ then
+ example_count=$((example_count - 1))
+
+ [ $example_last -gt 16383 ] && example_value4=$((example_last + (RANDOM * ( (32767 - example_last) / 2) / 32767)))
+ [ $example_last -le 16383 ] && example_value4=$((example_last - (RANDOM * (example_last / 2) / 32767)))
+ else
+ example_count=$((1 + (RANDOM * 5 / 32767) ))
+
+ if [ $example_last -gt 16383 ] && [ $example_value4 -gt 16383 ]
+ then
+ example_value4=$((example_value4 - 16383))
+ fi
+ if [ $example_last -le 16383 ] && [ $example_value4 -lt 16383 ]
+ then
+ example_value4=$((example_value4 + 16383))
+ fi
+ fi
+ example_last=$example_value4
+
+ # this should return:
+ # - 0 to send the data to netdata
+ # - 1 to report a failure to collect the data
+
+ return 0
+}
+
+# _check is called once, to find out if this chart should be enabled or not
+example_check() {
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
+
+ # check something
+ [ "${example_magic_number}" != "12345" ] && error "manual configuration required: you have to set example_magic_number=$example_magic_number in example.conf to start example chart." && return 1
+
+ # check that we can collect data
+ example_get || return 1
+
+ return 0
+}
+
+# _create is called once, to create the charts
+example_create() {
+ # create the chart with 3 dimensions
+ cat <<EOF
+CHART example.random '' "Random Numbers Stacked Chart" "% of random numbers" random random stacked $((example_priority)) $example_update_every
+DIMENSION random1 '' percentage-of-absolute-row 1 1
+DIMENSION random2 '' percentage-of-absolute-row 1 1
+DIMENSION random3 '' percentage-of-absolute-row 1 1
+CHART example.random2 '' "A random number" "random number" random random area $((example_priority + 1)) $example_update_every
+DIMENSION random '' absolute 1 1
+EOF
+
+ return 0
+}
+
+# _update is called continuously, to collect the values
+example_update() {
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
+
+ example_get || return 1
+
+ # write the result of the work.
+ cat <<VALUESEOF
+BEGIN example.random $1
+SET random1 = $example_value1
+SET random2 = $example_value2
+SET random3 = $example_value3
+END
+BEGIN example.random2 $1
+SET random = $example_value4
+END
+VALUESEOF
+
+ return 0
+}
diff --git a/collectors/charts.d.plugin/example/example.conf b/collectors/charts.d.plugin/example/example.conf
new file mode 100644
index 000000000..6232ca584
--- /dev/null
+++ b/collectors/charts.d.plugin/example/example.conf
@@ -0,0 +1,21 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+# to enable this chart, you have to set this to 12345
+# (just a demonstration for something that needs to be checked)
+#example_magic_number=12345
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#example_update_every=
+
+# the charts priority on the dashboard
+#example_priority=150000
+
+# the number of retries to do in case of failure
+# before disabling the module
+#example_retries=10
diff --git a/collectors/charts.d.plugin/exim/Makefile.inc b/collectors/charts.d.plugin/exim/Makefile.inc
new file mode 100644
index 000000000..ca2112a80
--- /dev/null
+++ b/collectors/charts.d.plugin/exim/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += exim/exim.chart.sh
+dist_chartsconfig_DATA += exim/exim.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += exim/README.md exim/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/exim/README.md b/collectors/charts.d.plugin/exim/README.md
new file mode 100644
index 000000000..d82951aac
--- /dev/null
+++ b/collectors/charts.d.plugin/exim/README.md
@@ -0,0 +1,2 @@
+> THIS MODULE IS OBSOLETE.
+> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
diff --git a/collectors/charts.d.plugin/exim/exim.chart.sh b/collectors/charts.d.plugin/exim/exim.chart.sh
new file mode 100644
index 000000000..8099a7249
--- /dev/null
+++ b/collectors/charts.d.plugin/exim/exim.chart.sh
@@ -0,0 +1,48 @@
+# shellcheck shell=bash
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+#
+# Contributed by @jsveiga with PR #480
+
+# the exim command to run
+exim_command=
+
+# how frequently to collect queue size
+exim_update_every=5
+
+exim_priority=60000
+
+exim_check() {
+ if [ -z "${exim_command}" ]
+ then
+ require_cmd exim || return 1
+ exim_command="${EXIM_CMD}"
+ fi
+
+ if [ "$(${exim_command} -bpc 2>&1 | grep -c denied)" -ne 0 ]
+ then
+ error "permission denied - please set 'queue_list_requires_admin = false' in your exim options file"
+ return 1
+ fi
+
+ return 0
+}
+
+exim_create() {
+ cat <<EOF
+CHART exim_local.qemails '' "Exim Queue Emails" "emails" queue exim.queued.emails line $((exim_priority + 1)) $exim_update_every
+DIMENSION emails '' absolute 1 1
+EOF
+ return 0
+}
+
+exim_update() {
+ echo "BEGIN exim_local.qemails $1"
+ echo "SET emails = $(run "${exim_command}" -bpc)"
+ echo "END"
+ return 0
+}
diff --git a/collectors/charts.d.plugin/exim/exim.conf b/collectors/charts.d.plugin/exim/exim.conf
new file mode 100644
index 000000000..f96ac4dbb
--- /dev/null
+++ b/collectors/charts.d.plugin/exim/exim.conf
@@ -0,0 +1,24 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+# THIS PLUGIN IS DEPRECATED
+# USE THE PYTHON.D ONE
+
+# the exim command to run
+# if empty, it will use the one found in the system path
+#exim_command=
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#exim_update_every=5
+
+# the charts priority on the dashboard
+#exim_priority=60000
+
+# the number of retries to do in case of failure
+# before disabling the module
+#exim_retries=10
diff --git a/collectors/charts.d.plugin/hddtemp/Makefile.inc b/collectors/charts.d.plugin/hddtemp/Makefile.inc
new file mode 100644
index 000000000..2bd29e5b1
--- /dev/null
+++ b/collectors/charts.d.plugin/hddtemp/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += hddtemp/hddtemp.chart.sh
+dist_chartsconfig_DATA += hddtemp/hddtemp.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += hddtemp/README.md hddtemp/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/hddtemp/README.md b/collectors/charts.d.plugin/hddtemp/README.md
new file mode 100644
index 000000000..98f18900c
--- /dev/null
+++ b/collectors/charts.d.plugin/hddtemp/README.md
@@ -0,0 +1,28 @@
+> THIS MODULE IS OBSOLETE.
+> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
+
+# hddtemp
+
+The plugin will collect temperatures from disks
+
+It will create one chart with all active disks
+
+1. **temperature in Celsius**
+
+### configuration
+
+hddtemp needs to be running in daemonized mode
+
+```sh
+# host with daemonized hddtemp
+hddtemp_host="localhost"
+
+# port on which hddtemp is showing data
+hddtemp_port="7634"
+
+# array of included disks
+# the default is to include all
+hddtemp_disks=()
+```
+
+---
diff --git a/collectors/charts.d.plugin/hddtemp/hddtemp.chart.sh b/collectors/charts.d.plugin/hddtemp/hddtemp.chart.sh
new file mode 100644
index 000000000..e90310981
--- /dev/null
+++ b/collectors/charts.d.plugin/hddtemp/hddtemp.chart.sh
@@ -0,0 +1,77 @@
+# shellcheck shell=bash
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+#
+# contributed by @paulfantom with PR #511
+
+# if this chart is called X.chart.sh, then all functions and global variables
+# must start with X_
+hddtemp_host="localhost"
+hddtemp_port="7634"
+declare -A hddtemp_disks=()
+
+# _update_every is a special variable - it holds the number of seconds
+# between the calls of the _update() function
+hddtemp_update_every=3
+hddtemp_priority=90000
+
+# _check is called once, to find out if this chart should be enabled or not
+hddtemp_check() {
+ require_cmd nc || return 1
+ run nc $hddtemp_host $hddtemp_port && return 0 || return 1
+}
+
+# _create is called once, to create the charts
+hddtemp_create() {
+ if [ ${#hddtemp_disks[@]} -eq 0 ]; then
+ local all
+ all=$(nc $hddtemp_host $hddtemp_port )
+ unset hddtemp_disks
+ # shellcheck disable=SC2190,SC2207
+ hddtemp_disks=( $(grep -Po '/dev/[^|]+' <<< "$all" | cut -c 6-) )
+ fi
+# local disk_names
+# disk_names=(`sed -e 's/||/\n/g;s/^|//' <<< "$all" | cut -d '|' -f2 | tr ' ' '_'`)
+
+ echo "CHART hddtemp.temperature 'disks_temp' 'temperature' 'Celsius' 'Disks temperature' 'hddtemp.temp' line $((hddtemp_priority)) $hddtemp_update_every"
+ for i in $(seq 0 $((${#hddtemp_disks[@]}-1))); do
+# echo "DIMENSION ${hddtemp_disks[i]} ${disk_names[i]} absolute 1 1"
+ echo "DIMENSION ${hddtemp_disks[$i]} '' absolute 1 1"
+ done
+ return 0
+}
+
+# _update is called continuously, to collect the values
+#hddtemp_last=0
+#hddtemp_count=0
+hddtemp_update() {
+# local all=( `nc $hddtemp_host $hddtemp_port | sed -e 's/||/\n/g;s/^|//' | cut -d '|' -f3` )
+# local all=( `nc $hddtemp_host $hddtemp_port | awk 'BEGIN { FS="|" };{i=4; while (i <= NF) {print $i+0;i+=5;};}'` )
+ OLD_IFS=$IFS
+ set -f
+ # shellcheck disable=SC2207
+ IFS="|" all=( $(nc $hddtemp_host $hddtemp_port 2>/dev/null) )
+ set +f
+ IFS=$OLD_IFS
+
+ # check if there is some data
+ if [ -z "${all[3]}" ]; then
+ return 1
+ fi
+
+ # write the result of the work.
+ echo "BEGIN hddtemp.temperature $1"
+ end=${#hddtemp_disks[@]}
+ for ((i=0; i<end; i++)); do
+ # temperature - this will turn SLP to zero
+ t=$(( ${all[ $((i * 5 + 3)) ]} ))
+ echo "SET ${hddtemp_disks[$i]} = $t"
+ done
+ echo "END"
+
+ return 0
+}
diff --git a/collectors/charts.d.plugin/hddtemp/hddtemp.conf b/collectors/charts.d.plugin/hddtemp/hddtemp.conf
new file mode 100644
index 000000000..b6037b40e
--- /dev/null
+++ b/collectors/charts.d.plugin/hddtemp/hddtemp.conf
@@ -0,0 +1,23 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+# THIS PLUGIN IS DEPRECATED
+# USE THE PYTHON.D ONE
+
+#hddtemp_host="localhost"
+#hddtemp_port="7634"
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#hddtemp_update_every=3
+
+# the charts priority on the dashboard
+#hddtemp_priority=90000
+
+# the number of retries to do in case of failure
+# before disabling the module
+#hddtemp_retries=10
diff --git a/collectors/charts.d.plugin/libreswan/Makefile.inc b/collectors/charts.d.plugin/libreswan/Makefile.inc
new file mode 100644
index 000000000..af767d0dd
--- /dev/null
+++ b/collectors/charts.d.plugin/libreswan/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += libreswan/libreswan.chart.sh
+dist_chartsconfig_DATA += libreswan/libreswan.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += libreswan/README.md libreswan/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/libreswan/README.md b/collectors/charts.d.plugin/libreswan/README.md
new file mode 100644
index 000000000..41026cf72
--- /dev/null
+++ b/collectors/charts.d.plugin/libreswan/README.md
@@ -0,0 +1,42 @@
+# libreswan
+
+The plugin will collects bytes-in, bytes-out and uptime for all established libreswan IPSEC tunnels.
+
+The following charts are created, **per tunnel**:
+
+1. **Uptime**
+
+ * the uptime of the tunnel
+
+2. **Traffic**
+
+ * bytes in
+ * bytes out
+
+### configuration
+
+Its config file is `/etc/netdata/charts.d/libreswan.conf`.
+
+The plugin executes 2 commands to collect all the information it needs:
+
+```sh
+ipsec whack --status
+ipsec whack --trafficstatus
+```
+
+The first command is used to extract the currently established tunnels, their IDs and their names.
+The second command is used to extract the current uptime and traffic.
+
+Most probably user `netdata` will not be able to query libreswan, so the `ipsec` commands will be denied.
+The plugin attempts to run `ipsec` as `sudo ipsec ...`, to get access to libreswan statistics.
+
+To allow user `netdata` execute `sudo ipsec ...`, create the file `/etc/sudoers.d/netdata` with this content:
+
+```
+netdata ALL = (root) NOPASSWD: /sbin/ipsec whack --status
+netdata ALL = (root) NOPASSWD: /sbin/ipsec whack --trafficstatus
+```
+
+Make sure the path `/sbin/ipsec` matches your setup (execute `which ipsec` to find the right path).
+
+---
diff --git a/collectors/charts.d.plugin/libreswan/libreswan.chart.sh b/collectors/charts.d.plugin/libreswan/libreswan.chart.sh
new file mode 100644
index 000000000..6e29f8473
--- /dev/null
+++ b/collectors/charts.d.plugin/libreswan/libreswan.chart.sh
@@ -0,0 +1,176 @@
+# shellcheck shell=bash disable=SC1117
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+#
+
+# _update_every is a special variable - it holds the number of seconds
+# between the calls of the _update() function
+libreswan_update_every=1
+
+# the priority is used to sort the charts on the dashboard
+# 1 = the first chart
+libreswan_priority=90000
+
+# set to 1, to run ipsec with sudo
+libreswan_sudo=1
+
+# global variables to store our collected data
+
+# [TUNNELID] = TUNNELNAME
+# here we track the *latest* established tunnels
+# as detected by: ipsec whack --status
+declare -A libreswan_connected_tunnels=()
+
+# [TUNNELID] = VALUE
+# here we track values of all established tunnels (not only the latest)
+# as detected by: ipsec whack --trafficstatus
+declare -A libreswan_traffic_in=()
+declare -A libreswan_traffic_out=()
+declare -A libreswan_established_add_time=()
+
+# [TUNNELNAME] = CHARTID
+# here we remember CHARTIDs of all tunnels
+# we need this to avoid converting tunnel names to chart IDs on every iteration
+declare -A libreswan_tunnel_charts=()
+
+# run the ipsec command
+libreswan_ipsec() {
+ if [ ${libreswan_sudo} -ne 0 ]
+ then
+ sudo -n "${IPSEC_CMD}" "${@}"
+ return $?
+ else
+ "${IPSEC_CMD}" "${@}"
+ return $?
+ fi
+}
+
+# fetch latest values - fill the arrays
+libreswan_get() {
+ # do all the work to collect / calculate the values
+ # for each dimension
+
+ # empty the variables
+ libreswan_traffic_in=()
+ libreswan_traffic_out=()
+ libreswan_established_add_time=()
+ libreswan_connected_tunnels=()
+
+ # convert the ipsec command output to a shell script
+ # and source it to get the values
+ # shellcheck disable=SC1090
+ source <(
+ {
+ libreswan_ipsec whack --status;
+ libreswan_ipsec whack --trafficstatus;
+ } | sed -n \
+ -e "s|[0-9]\+ #\([0-9]\+\): \"\(.*\)\".*IPsec SA established.*newest IPSEC.*|libreswan_connected_tunnels[\"\1\"]=\"\2\"|p" \
+ -e "s|[0-9]\+ #\([0-9]\+\): \"\(.*\)\",.* add_time=\([0-9]\+\),.* inBytes=\([0-9]\+\),.* outBytes=\([0-9]\+\).*|libreswan_traffic_in[\"\1\"]=\"\4\"; libreswan_traffic_out[\"\1\"]=\"\5\"; libreswan_established_add_time[\"\1\"]=\"\3\";|p"
+ ) || return 1
+
+ # check we got some data
+ [ ${#libreswan_connected_tunnels[@]} -eq 0 ] && return 1
+
+ return 0
+}
+
+# _check is called once, to find out if this chart should be enabled or not
+libreswan_check() {
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
+
+ require_cmd ipsec || return 1
+
+ # make sure it is libreswan
+ # shellcheck disable=SC2143
+ if [ -z "$(ipsec --version | grep -i libreswan)" ]
+ then
+ error "ipsec command is not Libreswan. Disabling Libreswan plugin."
+ return 1
+ fi
+
+ # check that we can collect data
+ libreswan_get || return 1
+
+ return 0
+}
+
+# create the charts for an ipsec tunnel
+libreswan_create_one() {
+ local n="${1}" name
+
+ name="${libreswan_connected_tunnels[${n}]}"
+
+ [ ! -z "${libreswan_tunnel_charts[${name}]}" ] && return 0
+
+ libreswan_tunnel_charts[${name}]="$(fixid "${name}")"
+
+ cat <<EOF
+CHART libreswan.${libreswan_tunnel_charts[${name}]}_net '${name}_net' "LibreSWAN Tunnel ${name} Traffic" "kilobits/s" "${name}" libreswan.net area $((libreswan_priority)) $libreswan_update_every
+DIMENSION in '' incremental 8 1000
+DIMENSION out '' incremental -8 1000
+CHART libreswan.${libreswan_tunnel_charts[${name}]}_uptime '${name}_uptime' "LibreSWAN Tunnel ${name} Uptime" "seconds" "${name}" libreswan.uptime line $((libreswan_priority + 1)) $libreswan_update_every
+DIMENSION uptime '' absolute 1 1
+EOF
+
+ return 0
+
+}
+
+# _create is called once, to create the charts
+libreswan_create() {
+ local n
+ for n in "${!libreswan_connected_tunnels[@]}"
+ do
+ libreswan_create_one "${n}"
+ done
+ return 0
+}
+
+libreswan_now=$(date +%s)
+
+# send the values to netdata for an ipsec tunnel
+libreswan_update_one() {
+ local n="${1}" microseconds="${2}" name id uptime
+
+ name="${libreswan_connected_tunnels[${n}]}"
+ id="${libreswan_tunnel_charts[${name}]}"
+
+ [ -z "${id}" ] && libreswan_create_one "${name}"
+
+ uptime=$(( libreswan_now - libreswan_established_add_time[${n}] ))
+ [ ${uptime} -lt 0 ] && uptime=0
+
+ # write the result of the work.
+ cat <<VALUESEOF
+BEGIN libreswan.${id}_net ${microseconds}
+SET in = ${libreswan_traffic_in[${n}]}
+SET out = ${libreswan_traffic_out[${n}]}
+END
+BEGIN libreswan.${id}_uptime ${microseconds}
+SET uptime = ${uptime}
+END
+VALUESEOF
+}
+
+# _update is called continiously, to collect the values
+libreswan_update() {
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
+
+ libreswan_get || return 1
+ libreswan_now=$(date +%s)
+
+ local n
+ for n in "${!libreswan_connected_tunnels[@]}"
+ do
+ libreswan_update_one "${n}" "${@}"
+ done
+
+ return 0
+}
diff --git a/collectors/charts.d.plugin/libreswan/libreswan.conf b/collectors/charts.d.plugin/libreswan/libreswan.conf
new file mode 100644
index 000000000..9b3ee77b7
--- /dev/null
+++ b/collectors/charts.d.plugin/libreswan/libreswan.conf
@@ -0,0 +1,29 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+#
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#libreswan_update_every=1
+
+# the charts priority on the dashboard
+#libreswan_priority=90000
+
+# the number of retries to do in case of failure
+# before disabling the module
+#libreswan_retries=10
+
+# set to 1, to run ipsec with sudo (the default)
+# set to 0, to run ipsec without sudo
+#libreswan_sudo=1
+
+# TO ALLOW NETDATA RUN ipsec AS ROOT
+# CREATE THE FILE: /etc/sudoers.d/netdata
+# WITH THESE 2 LINES (uncommented of course):
+#
+# netdata ALL = (root) NOPASSWD: /sbin/ipsec whack --status
+# netdata ALL = (root) NOPASSWD: /sbin/ipsec whack --trafficstatus
diff --git a/collectors/charts.d.plugin/load_average/Makefile.inc b/collectors/charts.d.plugin/load_average/Makefile.inc
new file mode 100644
index 000000000..e5a481bf4
--- /dev/null
+++ b/collectors/charts.d.plugin/load_average/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += load_average/load_average.chart.sh
+dist_chartsconfig_DATA += load_average/load_average.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += load_average/README.md load_average/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/load_average/README.md b/collectors/charts.d.plugin/load_average/README.md
new file mode 100644
index 000000000..39d3b8189
--- /dev/null
+++ b/collectors/charts.d.plugin/load_average/README.md
@@ -0,0 +1,2 @@
+> THIS MODULE IS OBSOLETE.
+> THE NETDATA DAEMON COLLECTS LOAD AVERAGE BY ITSELF
diff --git a/collectors/charts.d.plugin/load_average/load_average.chart.sh b/collectors/charts.d.plugin/load_average/load_average.chart.sh
new file mode 100644
index 000000000..b30cb850f
--- /dev/null
+++ b/collectors/charts.d.plugin/load_average/load_average.chart.sh
@@ -0,0 +1,71 @@
+# shellcheck shell=bash disable=SC2154,SC1072,SC1073,SC2009,SC2162,SC2006,SC2002,SC2086,SC1117
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+#
+
+load_average_update_every=5
+load_priority=100
+
+# this is an example charts.d collector
+# it is disabled by default.
+# there is no point to enable it, since netdata already
+# collects this information using its internal plugins.
+load_average_enabled=0
+
+load_average_check() {
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
+
+ if [ ${load_average_update_every} -lt 5 ]
+ then
+ # there is no meaning for shorter than 5 seconds
+ # the kernel changes this value every 5 seconds
+ load_average_update_every=5
+ fi
+
+ [ ${load_average_enabled} -eq 0 ] && return 1
+ return 0
+}
+
+load_average_create() {
+ # create a chart with 3 dimensions
+cat <<EOF
+CHART system.load '' "System Load Average" "load" load system.load line $((load_priority + 1)) $load_average_update_every
+DIMENSION load1 '1 min' absolute 1 100
+DIMENSION load5 '5 mins' absolute 1 100
+DIMENSION load15 '15 mins' absolute 1 100
+EOF
+
+ return 0
+}
+
+load_average_update() {
+ # do all the work to collect / calculate the values
+ # for each dimension
+ # remember: KEEP IT SIMPLE AND SHORT
+
+ # here we parse the system average load
+ # it is decimal (with 2 decimal digits), so we remove the dot and
+ # at the definition we have divisor = 100, to have the graph show the right value
+ loadavg="`cat /proc/loadavg | sed -e "s/\.//g"`"
+ load1=`echo $loadavg | cut -d ' ' -f 1`
+ load5=`echo $loadavg | cut -d ' ' -f 2`
+ load15=`echo $loadavg | cut -d ' ' -f 3`
+
+ # write the result of the work.
+ cat <<VALUESEOF
+BEGIN system.load
+SET load1 = $load1
+SET load5 = $load5
+SET load15 = $load15
+END
+VALUESEOF
+
+ return 0
+}
+
diff --git a/collectors/charts.d.plugin/load_average/load_average.conf b/collectors/charts.d.plugin/load_average/load_average.conf
new file mode 100644
index 000000000..68979275f
--- /dev/null
+++ b/collectors/charts.d.plugin/load_average/load_average.conf
@@ -0,0 +1,22 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+# THIS PLUGIN IS DEPRECATED
+# netdata can collect this metric already
+
+#load_average_enabled=0
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#load_average_update_every=5
+
+# the charts priority on the dashboard
+#load_average_priority=100
+
+# the number of retries to do in case of failure
+# before disabling the module
+#load_average_retries=10
diff --git a/collectors/charts.d.plugin/loopsleepms.sh.inc b/collectors/charts.d.plugin/loopsleepms.sh.inc
new file mode 100644
index 000000000..bdc032b99
--- /dev/null
+++ b/collectors/charts.d.plugin/loopsleepms.sh.inc
@@ -0,0 +1,237 @@
+# no need for shebang - this file is included from other scripts
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+LOOPSLEEP_DATE="$(which date 2>/dev/null || command -v date 2>/dev/null)"
+if [ -z "$LOOPSLEEP_DATE" ]
+ then
+ echo >&2 "$0: ERROR: Cannot find the command 'date' in the system path."
+ exit 1
+fi
+
+# -----------------------------------------------------------------------------
+# use the date command as a high resolution timer
+
+now_ms=
+LOOPSLEEPMS_HIGHRES=1
+test "$($LOOPSLEEP_DATE +%N)" = "%N" && LOOPSLEEPMS_HIGHRES=0
+test -z "$($LOOPSLEEP_DATE +%N)" && LOOPSLEEPMS_HIGHRES=0
+current_time_ms_from_date() {
+ if [ $LOOPSLEEPMS_HIGHRES -eq 0 ]
+ then
+ now_ms="$($LOOPSLEEP_DATE +'%s')000"
+ else
+ now_ms="$(( $( $LOOPSLEEP_DATE +'%s * 1000 + %-N / 1000000' ) ))"
+ fi
+}
+
+# -----------------------------------------------------------------------------
+# use /proc/uptime as a high resolution timer
+
+current_time_ms_from_date
+current_time_ms_from_uptime_started="${now_ms}"
+current_time_ms_from_uptime_last="${now_ms}"
+current_time_ms_from_uptime_first=0
+current_time_ms_from_uptime() {
+ local up rest arr=() n
+
+ read up rest </proc/uptime
+ if [ $? -ne 0 ]
+ then
+ echo >&2 "$0: Cannot read /proc/uptime - falling back to current_time_ms_from_date()."
+ current_time_ms="current_time_ms_from_date"
+ current_time_ms_from_date
+ current_time_ms_accuracy=1
+ return
+ fi
+
+ arr=(${up//./ })
+
+ if [ ${#arr[1]} -lt 1 ]
+ then
+ n="${arr[0]}000"
+ elif [ ${#arr[1]} -lt 2 ]
+ then
+ n="${arr[0]}${arr[1]}00"
+ elif [ ${#arr[1]} -lt 3 ]
+ then
+ n="${arr[0]}${arr[1]}0"
+ else
+ n="${arr[0]}${arr[1]}"
+ fi
+
+ now_ms=$((current_time_ms_from_uptime_started - current_time_ms_from_uptime_first + n))
+
+ if [ "${now_ms}" -lt "${current_time_ms_from_uptime_last}" ]
+ then
+ echo >&2 "$0: Cannot use current_time_ms_from_uptime() - new time ${now_ms} is older than the last ${current_time_ms_from_uptime_last} - falling back to current_time_ms_from_date()."
+ current_time_ms="current_time_ms_from_date"
+ current_time_ms_from_date
+ current_time_ms_accuracy=1
+ fi
+
+ current_time_ms_from_uptime_last="${now_ms}"
+}
+current_time_ms_from_uptime
+current_time_ms_from_uptime_first="$((now_ms - current_time_ms_from_uptime_started))"
+current_time_ms_from_uptime_last="${current_time_ms_from_uptime_first}"
+current_time_ms="current_time_ms_from_uptime"
+current_time_ms_accuracy=10
+if [ "${current_time_ms_from_uptime_first}" -eq 0 ]
+ then
+ echo >&2 "$0: Invalid setup for current_time_ms_from_uptime() - falling back to current_time_ms_from_date()."
+ current_time_ms="current_time_ms_from_date"
+ current_time_ms_accuracy=1
+fi
+
+# -----------------------------------------------------------------------------
+# use read with timeout for sleep
+
+mysleep=""
+
+mysleep_fifo="${NETDATA_CACHE_DIR-/tmp}/.netdata_bash_sleep_timer_fifo"
+[ -f "${mysleep_fifo}" ] && rm "${mysleep_fifo}"
+[ ! -p "${mysleep_fifo}" ] && mkfifo "${mysleep_fifo}"
+[ -p "${mysleep_fifo}" ] && mysleep="mysleep_read"
+
+mysleep_read() {
+ read -t "${1}" <>"${mysleep_fifo}"
+ ret=$?
+ if [ $ret -le 128 ]
+ then
+ echo >&2 "$0: Cannot use read for sleeping (return code ${ret})."
+ mysleep="sleep"
+ ${mysleep} "${1}"
+ fi
+}
+
+# -----------------------------------------------------------------------------
+# use bash loadable module for sleep
+
+mysleep_builtin() {
+ builtin sleep "${1}"
+ ret=$?
+ if [ $ret -ne 0 ]
+ then
+ echo >&2 "$0: Cannot use builtin sleep for sleeping (return code ${ret})."
+ mysleep="sleep"
+ ${mysleep} "${1}"
+ fi
+}
+
+if [ -z "${mysleep}" -a "$((BASH_VERSINFO[0] +0))" -ge 3 -a "${NETDATA_BASH_LOADABLES}" != "DISABLE" ]
+ then
+ # enable modules only for bash version 3+
+
+ for bash_modules_path in ${BASH_LOADABLES_PATH//:/ } "$(pkg-config bash --variable=loadablesdir 2>/dev/null)" "/usr/lib/bash" "/lib/bash" "/lib64/bash" "/usr/local/lib/bash" "/usr/local/lib64/bash"
+ do
+ [ -z "${bash_modules_path}" -o ! -d "${bash_modules_path}" ] && continue
+
+ # check for sleep
+ for bash_module_sleep in "sleep" "sleep.so"
+ do
+ if [ -f "${bash_modules_path}/${bash_module_sleep}" ]
+ then
+ if enable -f "${bash_modules_path}/${bash_module_sleep}" sleep 2>/dev/null
+ then
+ mysleep="mysleep_builtin"
+ # echo >&2 "$0: Using bash loadable ${bash_modules_path}/${bash_module_sleep} for sleep"
+ break
+ fi
+ fi
+
+ done
+
+ [ ! -z "${mysleep}" ] && break
+ done
+fi
+
+# -----------------------------------------------------------------------------
+# fallback to external sleep
+
+[ -z "${mysleep}" ] && mysleep="sleep"
+
+
+# -----------------------------------------------------------------------------
+# this function is used to sleep a fraction of a second
+# it calculates the difference between every time is called
+# and tries to align the sleep time to give you exactly the
+# loop you need.
+
+LOOPSLEEPMS_LASTRUN=0
+LOOPSLEEPMS_NEXTRUN=0
+LOOPSLEEPMS_LASTSLEEP=0
+LOOPSLEEPMS_LASTWORK=0
+
+loopsleepms() {
+ local tellwork=0 t="${1}" div s m now mstosleep
+
+ if [ "${t}" = "tellwork" ]
+ then
+ tellwork=1
+ shift
+ t="${1}"
+ fi
+
+ # $t = the time in seconds to wait
+
+ # if high resolution is not supported
+ # just sleep the time requested, in seconds
+ if [ ${LOOPSLEEPMS_HIGHRES} -eq 0 ]
+ then
+ sleep ${t}
+ return
+ fi
+
+ # get the current time, in ms in ${now_ms}
+ ${current_time_ms}
+
+ # calculate ms since last run
+ [ ${LOOPSLEEPMS_LASTRUN} -gt 0 ] && \
+ LOOPSLEEPMS_LASTWORK=$((now_ms - LOOPSLEEPMS_LASTRUN - LOOPSLEEPMS_LASTSLEEP + current_time_ms_accuracy))
+ # echo "# last loop's work took $LOOPSLEEPMS_LASTWORK ms"
+
+ # remember this run
+ LOOPSLEEPMS_LASTRUN=${now_ms}
+
+ # calculate the next run
+ LOOPSLEEPMS_NEXTRUN=$(( ( now_ms - ( now_ms % ( t * 1000 ) ) ) + ( t * 1000 ) ))
+
+ # calculate ms to sleep
+ mstosleep=$(( LOOPSLEEPMS_NEXTRUN - now_ms + current_time_ms_accuracy ))
+ # echo "# mstosleep is $mstosleep ms"
+
+ # if we are too slow, sleep some time
+ test ${mstosleep} -lt 200 && mstosleep=200
+
+ s=$(( mstosleep / 1000 ))
+ m=$(( mstosleep - (s * 1000) ))
+ [ "${m}" -lt 100 ] && m="0${m}"
+ [ "${m}" -lt 10 ] && m="0${m}"
+
+ test $tellwork -eq 1 && echo >&2 " >>> PERFORMANCE >>> WORK TOOK ${LOOPSLEEPMS_LASTWORK} ms ( $((LOOPSLEEPMS_LASTWORK * 100 / 1000)).$((LOOPSLEEPMS_LASTWORK % 10))% cpu ) >>> SLEEPING ${mstosleep} ms"
+
+ # echo "# sleeping ${s}.${m}"
+ # echo
+ ${mysleep} ${s}.${m}
+
+ # keep the values we need
+ # for our next run
+ LOOPSLEEPMS_LASTSLEEP=$mstosleep
+}
+
+# test it
+#while [ 1 ]
+#do
+# r=$(( (RANDOM * 2000 / 32767) ))
+# s=$((r / 1000))
+# m=$((r - (s * 1000)))
+# [ "${m}" -lt 100 ] && m="0${m}"
+# [ "${m}" -lt 10 ] && m="0${m}"
+# echo "${r} = ${s}.${m}"
+#
+# # the work
+# ${mysleep} ${s}.${m}
+#
+# # the alignment loop
+# loopsleepms tellwork 1
+#done
diff --git a/collectors/charts.d.plugin/mem_apps/Makefile.inc b/collectors/charts.d.plugin/mem_apps/Makefile.inc
new file mode 100644
index 000000000..ea546fb69
--- /dev/null
+++ b/collectors/charts.d.plugin/mem_apps/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += mem_apps/mem_apps.chart.sh
+dist_chartsconfig_DATA += mem_apps/mem_apps.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += mem_apps/README.md mem_apps/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/mem_apps/README.md b/collectors/charts.d.plugin/mem_apps/README.md
new file mode 100644
index 000000000..cd8adf0a2
--- /dev/null
+++ b/collectors/charts.d.plugin/mem_apps/README.md
@@ -0,0 +1,2 @@
+> THIS MODULE IS OBSOLETE.
+> USE APPS.PLUGIN.
diff --git a/collectors/charts.d.plugin/mem_apps/mem_apps.chart.sh b/collectors/charts.d.plugin/mem_apps/mem_apps.chart.sh
new file mode 100644
index 000000000..a13dc71f1
--- /dev/null
+++ b/collectors/charts.d.plugin/mem_apps/mem_apps.chart.sh
@@ -0,0 +1,63 @@
+# shellcheck shell=bash disable=SC2154,SC1072,SC1073,SC2009,SC2162,SC2006,SC2002,SC2086,SC1117
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+#
+
+mem_apps_apps=
+
+# these are required for computing memory in bytes and cpu in seconds
+#mem_apps_pagesize="`getconf PAGESIZE`"
+#mem_apps_clockticks="`getconf CLK_TCK`"
+
+mem_apps_update_every=
+
+mem_apps_check() {
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
+
+ if [ -z "$mem_apps_apps" ]
+ then
+ error "manual configuration required: please set mem_apps_apps='command1 command2 ...' in $confd/mem_apps_apps.conf"
+ return 1
+ fi
+ return 0
+}
+
+mem_apps_bc_finalze=
+
+mem_apps_create() {
+
+ echo "CHART chartsd_apps.mem '' 'Apps Memory' MB apps apps.mem stacked 20000 $mem_apps_update_every"
+
+ local x=
+ for x in $mem_apps_apps
+ do
+ echo "DIMENSION $x $x absolute 1 1024"
+
+ # this string is needed later in the update() function
+ # to finalize the instructions for the bc command
+ mem_apps_bc_finalze="$mem_apps_bc_finalze \"SET $x = \"; $x;"
+ done
+ return 0
+}
+
+mem_apps_update() {
+ # do all the work to collect / calculate the values
+ # for each dimension
+ # remember: KEEP IT SIMPLE AND SHORT
+
+ echo "BEGIN chartsd_apps.mem"
+ ps -o comm,rss -C "$mem_apps_apps" |\
+ grep -v "^COMMAND" |\
+ ( sed -e "s/ \+/ /g" -e "s/ /+=/g";
+ echo "$mem_apps_bc_finalze"
+ ) | bc
+ echo "END"
+
+ return 0
+}
diff --git a/collectors/charts.d.plugin/mem_apps/mem_apps.conf b/collectors/charts.d.plugin/mem_apps/mem_apps.conf
new file mode 100644
index 000000000..75d24dc3e
--- /dev/null
+++ b/collectors/charts.d.plugin/mem_apps/mem_apps.conf
@@ -0,0 +1,19 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+# THIS PLUGIN IS DEPRECATED
+# app.plugin can do better
+
+#mem_apps_apps=
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#mem_apps_update_every=2
+
+# the number of retries to do in case of failure
+# before disabling the module
+#mem_apps_retries=10
diff --git a/collectors/charts.d.plugin/mysql/Makefile.inc b/collectors/charts.d.plugin/mysql/Makefile.inc
new file mode 100644
index 000000000..ca02fd078
--- /dev/null
+++ b/collectors/charts.d.plugin/mysql/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += mysql/mysql.chart.sh
+dist_chartsconfig_DATA += mysql/mysql.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += mysql/README.md mysql/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/mysql/README.md b/collectors/charts.d.plugin/mysql/README.md
new file mode 100644
index 000000000..6765b53ab
--- /dev/null
+++ b/collectors/charts.d.plugin/mysql/README.md
@@ -0,0 +1,81 @@
+> THIS MODULE IS OBSOLETE.
+> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
+
+# mysql
+
+The plugin will monitor one or more mysql servers
+
+It will produce the following charts:
+
+1. **Bandwidth** in kbps
+ * in
+ * out
+
+2. **Queries** in queries/sec
+ * queries
+ * questions
+ * slow queries
+
+3. **Operations** in operations/sec
+ * opened tables
+ * flush
+ * commit
+ * delete
+ * prepare
+ * read first
+ * read key
+ * read next
+ * read prev
+ * read random
+ * read random next
+ * rollback
+ * save point
+ * update
+ * write
+
+4. **Table Locks** in locks/sec
+ * immediate
+ * waited
+
+5. **Select Issues** in issues/sec
+ * full join
+ * full range join
+ * range
+ * range check
+ * scan
+
+6. **Sort Issues** in issues/sec
+ * merge passes
+ * range
+ * scan
+
+### configuration
+
+You can configure many database servers, like this:
+
+You can provide, per server, the following:
+
+1. a name, anything you like, but keep it short
+2. the mysql command to connect to the server
+3. the mysql command line options to be used for connecting to the server
+
+Here is an example for 2 servers:
+
+```sh
+mysql_opts[server1]="-h server1.example.com"
+mysql_opts[server2]="-h server2.example.com --connect_timeout 2"
+```
+
+The above will use the `mysql` command found in the system path.
+You can also provide a custom mysql command per server, like this:
+
+```sh
+mysql_cmds[server2]="/opt/mysql/bin/mysql"
+```
+
+The above sets the mysql command only for server2. server1 will use the system default.
+
+If no configuration is given, the plugin will attempt to connect to mysql server at localhost.
+
+
+---
diff --git a/collectors/charts.d.plugin/mysql/mysql.chart.sh b/collectors/charts.d.plugin/mysql/mysql.chart.sh
new file mode 100644
index 000000000..37e8e2a7c
--- /dev/null
+++ b/collectors/charts.d.plugin/mysql/mysql.chart.sh
@@ -0,0 +1,528 @@
+# shellcheck shell=bash
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+#
+
+# http://dev.mysql.com/doc/refman/5.0/en/server-status-variables.html
+#
+# https://dev.mysql.com/doc/refman/5.1/en/show-status.html
+# SHOW STATUS provides server status information (see Section 5.1.6, “Server Status Variables”).
+# This statement does not require any privilege.
+# It requires only the ability to connect to the server.
+
+mysql_update_every=2
+mysql_priority=60000
+
+declare -A mysql_cmds=() mysql_opts=() mysql_ids=() mysql_data=()
+
+mysql_get() {
+ local arr
+ local oIFS="${IFS}"
+ mysql_data=()
+ IFS=$'\t'$'\n'
+ #arr=($(run "${@}" -e "SHOW GLOBAL STATUS WHERE value REGEXP '^[0-9]';" | egrep "^(Bytes|Slow_|Que|Handl|Table|Selec|Sort_|Creat|Conne|Abort|Binlo|Threa|Innod|Qcach|Key_|Open)" ))
+ #arr=($(run "${@}" -N -e "SHOW GLOBAL STATUS;" | egrep "^(Bytes|Slow_|Que|Handl|Table|Selec|Sort_|Creat|Conne|Abort|Binlo|Threa|Innod|Qcach|Key_|Open)[^ ]+\s[0-9]" ))
+ # shellcheck disable=SC2207
+ arr=($(run "${@}" -N -e "SHOW GLOBAL STATUS;" | grep -E "^(Bytes|Slow_|Que|Handl|Table|Selec|Sort_|Creat|Conne|Abort|Binlo|Threa|Innod|Qcach|Key_|Open)[^[:space:]]+[[:space:]]+[0-9]+" ))
+ IFS="${oIFS}"
+
+ [ "${#arr[@]}" -lt 3 ] && return 1
+ local end=${#arr[@]}
+ for ((i=2;i<end;i+=2)); do
+ mysql_data["${arr[$i]}"]=${arr[$i+1]}
+ done
+
+ [ -z "${mysql_data[Connections]}" ] && return 1
+
+ mysql_data[Thread_cache_misses]=0
+ [ $(( mysql_data[Connections] + 1 - 1 )) -gt 0 ] && mysql_data[Thread_cache_misses]=$(( mysql_data[Threads_created] * 10000 / mysql_data[Connections] ))
+
+ return 0
+}
+
+mysql_check() {
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
+
+ local x m mysql_cmd tryroot=0 unconfigured=0
+
+ if [ "${1}" = "tryroot" ]
+ then
+ tryroot=1
+ shift
+ fi
+
+ # shellcheck disable=SC2230
+ [ -z "${mysql_cmd}" ] && mysql_cmd="$(which mysql 2>/dev/null || command -v mysql 2>/dev/null)"
+
+ if [ ${#mysql_opts[@]} -eq 0 ]
+ then
+ unconfigured=1
+
+ mysql_cmds[local]="$mysql_cmd"
+
+ if [ $tryroot -eq 1 ]
+ then
+ # the user has not configured us for mysql access
+ # if the root user is passwordless in mysql, we can
+ # attempt to connect to mysql as root
+ mysql_opts[local]="-u root"
+ else
+ mysql_opts[local]=
+ fi
+ fi
+
+ # check once if the url works
+ for m in "${!mysql_opts[@]}"
+ do
+ [ -z "${mysql_cmds[$m]}" ] && mysql_cmds[$m]="$mysql_cmd"
+ if [ -z "${mysql_cmds[$m]}" ]
+ then
+ # shellcheck disable=SC2154
+ error "cannot get mysql command for '${m}'. Please set mysql_cmds[$m]='/path/to/mysql', in $confd/mysql.conf"
+ fi
+
+ mysql_get "${mysql_cmds[$m]}" ${mysql_opts[$m]}
+ # shellcheck disable=SC2181
+ if [ ! $? -eq 0 ]
+ then
+ error "cannot get global status for '$m'. Please set mysql_opts[$m]='options' to whatever needed to get connected to the mysql server, in $confd/mysql.conf"
+ unset "mysql_cmds[$m]"
+ unset "mysql_opts[$m]"
+ unset "mysql_ids[$m]"
+ continue
+ fi
+
+ mysql_ids[$m]="$( fixid "$m" )"
+ done
+
+ if [ ${#mysql_opts[@]} -eq 0 ]
+ then
+ if [ ${unconfigured} -eq 1 ] && [ ${tryroot} -eq 0 ]
+ then
+ mysql_check tryroot "${@}"
+ return $?
+ else
+ error "no mysql servers found. Please set mysql_opts[name]='options' to whatever needed to get connected to the mysql server, in $confd/mysql.conf"
+ return 1
+ fi
+ fi
+
+ return 0
+}
+
+mysql_create() {
+ local x
+
+ # create the charts
+ for x in "${mysql_ids[@]}"
+ do
+ cat <<EOF
+CHART mysql_$x.net '' "mysql Bandwidth" "kilobits/s" bandwidth mysql.net area $((mysql_priority + 1)) $mysql_update_every
+DIMENSION Bytes_received in incremental 8 1024
+DIMENSION Bytes_sent out incremental -8 1024
+
+CHART mysql_$x.queries '' "mysql Queries" "queries/s" queries mysql.queries line $((mysql_priority + 2)) $mysql_update_every
+DIMENSION Queries queries incremental 1 1
+DIMENSION Questions questions incremental 1 1
+DIMENSION Slow_queries slow_queries incremental -1 1
+
+CHART mysql_$x.handlers '' "mysql Handlers" "handlers/s" handlers mysql.handlers line $((mysql_priority + 3)) $mysql_update_every
+DIMENSION Handler_commit commit incremental 1 1
+DIMENSION Handler_delete delete incremental 1 1
+DIMENSION Handler_prepare prepare incremental 1 1
+DIMENSION Handler_read_first read_first incremental 1 1
+DIMENSION Handler_read_key read_key incremental 1 1
+DIMENSION Handler_read_next read_next incremental 1 1
+DIMENSION Handler_read_prev read_prev incremental 1 1
+DIMENSION Handler_read_rnd read_rnd incremental 1 1
+DIMENSION Handler_read_rnd_next read_rnd_next incremental 1 1
+DIMENSION Handler_rollback rollback incremental 1 1
+DIMENSION Handler_savepoint savepoint incremental 1 1
+DIMENSION Handler_savepoint_rollback savepoint_rollback incremental 1 1
+DIMENSION Handler_update update incremental 1 1
+DIMENSION Handler_write write incremental 1 1
+
+CHART mysql_$x.table_locks '' "mysql Tables Locks" "locks/s" locks mysql.table_locks line $((mysql_priority + 4)) $mysql_update_every
+DIMENSION Table_locks_immediate immediate incremental 1 1
+DIMENSION Table_locks_waited waited incremental -1 1
+
+CHART mysql_$x.join_issues '' "mysql Select Join Issues" "joins/s" issues mysql.join_issues line $((mysql_priority + 5)) $mysql_update_every
+DIMENSION Select_full_join full_join incremental 1 1
+DIMENSION Select_full_range_join full_range_join incremental 1 1
+DIMENSION Select_range range incremental 1 1
+DIMENSION Select_range_check range_check incremental 1 1
+DIMENSION Select_scan scan incremental 1 1
+
+CHART mysql_$x.sort_issues '' "mysql Sort Issues" "issues/s" issues mysql.sort.issues line $((mysql_priority + 6)) $mysql_update_every
+DIMENSION Sort_merge_passes merge_passes incremental 1 1
+DIMENSION Sort_range range incremental 1 1
+DIMENSION Sort_scan scan incremental 1 1
+
+CHART mysql_$x.tmp '' "mysql Tmp Operations" "counter" temporaries mysql.tmp line $((mysql_priority + 7)) $mysql_update_every
+DIMENSION Created_tmp_disk_tables disk_tables incremental 1 1
+DIMENSION Created_tmp_files files incremental 1 1
+DIMENSION Created_tmp_tables tables incremental 1 1
+
+CHART mysql_$x.connections '' "mysql Connections" "connections/s" connections mysql.connections line $((mysql_priority + 8)) $mysql_update_every
+DIMENSION Connections all incremental 1 1
+DIMENSION Aborted_connects aborded incremental 1 1
+
+CHART mysql_$x.binlog_cache '' "mysql Binlog Cache" "transactions/s" binlog mysql.binlog_cache line $((mysql_priority + 9)) $mysql_update_every
+DIMENSION Binlog_cache_disk_use disk incremental 1 1
+DIMENSION Binlog_cache_use all incremental 1 1
+
+CHART mysql_$x.threads '' "mysql Threads" "threads" threads mysql.threads line $((mysql_priority + 10)) $mysql_update_every
+DIMENSION Threads_connected connected absolute 1 1
+DIMENSION Threads_created created incremental 1 1
+DIMENSION Threads_cached cached absolute -1 1
+DIMENSION Threads_running running absolute 1 1
+
+CHART mysql_$x.thread_cache_misses '' "mysql Threads Cache Misses" "misses" threads mysql.thread_cache_misses area $((mysql_priority + 11)) $mysql_update_every
+DIMENSION misses misses absolute 1 100
+
+CHART mysql_$x.innodb_io '' "mysql InnoDB I/O Bandwidth" "kilobytes/s" innodb mysql.innodb_io area $((mysql_priority + 12)) $mysql_update_every
+DIMENSION Innodb_data_read read incremental 1 1024
+DIMENSION Innodb_data_written write incremental -1 1024
+
+CHART mysql_$x.innodb_io_ops '' "mysql InnoDB I/O Operations" "operations/s" innodb mysql.innodb_io_ops line $((mysql_priority + 13)) $mysql_update_every
+DIMENSION Innodb_data_reads reads incremental 1 1
+DIMENSION Innodb_data_writes writes incremental -1 1
+DIMENSION Innodb_data_fsyncs fsyncs incremental 1 1
+
+CHART mysql_$x.innodb_io_pending_ops '' "mysql InnoDB Pending I/O Operations" "operations" innodb mysql.innodb_io_pending_ops line $((mysql_priority + 14)) $mysql_update_every
+DIMENSION Innodb_data_pending_reads reads absolute 1 1
+DIMENSION Innodb_data_pending_writes writes absolute -1 1
+DIMENSION Innodb_data_pending_fsyncs fsyncs absolute 1 1
+
+CHART mysql_$x.innodb_log '' "mysql InnoDB Log Operations" "operations/s" innodb mysql.innodb_log line $((mysql_priority + 15)) $mysql_update_every
+DIMENSION Innodb_log_waits waits incremental 1 1
+DIMENSION Innodb_log_write_requests write_requests incremental -1 1
+DIMENSION Innodb_log_writes writes incremental -1 1
+
+CHART mysql_$x.innodb_os_log '' "mysql InnoDB OS Log Operations" "operations" innodb mysql.innodb_os_log line $((mysql_priority + 16)) $mysql_update_every
+DIMENSION Innodb_os_log_fsyncs fsyncs incremental 1 1
+DIMENSION Innodb_os_log_pending_fsyncs pending_fsyncs absolute 1 1
+DIMENSION Innodb_os_log_pending_writes pending_writes absolute -1 1
+
+CHART mysql_$x.innodb_os_log_io '' "mysql InnoDB OS Log Bandwidth" "kilobytes/s" innodb mysql.innodb_os_log_io area $((mysql_priority + 17)) $mysql_update_every
+DIMENSION Innodb_os_log_written write incremental -1 1024
+
+CHART mysql_$x.innodb_cur_row_lock '' "mysql InnoDB Current Row Locks" "operations" innodb mysql.innodb_cur_row_lock area $((mysql_priority + 18)) $mysql_update_every
+DIMENSION Innodb_row_lock_current_waits current_waits absolute 1 1
+
+CHART mysql_$x.innodb_rows '' "mysql InnoDB Row Operations" "operations/s" innodb mysql.innodb_rows area $((mysql_priority + 19)) $mysql_update_every
+DIMENSION Innodb_rows_read read incremental 1 1
+DIMENSION Innodb_rows_deleted deleted incremental -1 1
+DIMENSION Innodb_rows_inserted inserted incremental 1 1
+DIMENSION Innodb_rows_updated updated incremental -1 1
+
+CHART mysql_$x.innodb_buffer_pool_pages '' "mysql InnoDB Buffer Pool Pages" "pages" innodb mysql.innodb_buffer_pool_pages line $((mysql_priority + 20)) $mysql_update_every
+DIMENSION Innodb_buffer_pool_pages_data data absolute 1 1
+DIMENSION Innodb_buffer_pool_pages_dirty dirty absolute -1 1
+DIMENSION Innodb_buffer_pool_pages_free free absolute 1 1
+DIMENSION Innodb_buffer_pool_pages_flushed flushed incremental -1 1
+DIMENSION Innodb_buffer_pool_pages_misc misc absolute -1 1
+DIMENSION Innodb_buffer_pool_pages_total total absolute 1 1
+
+CHART mysql_$x.innodb_buffer_pool_bytes '' "mysql InnoDB Buffer Pool Bytes" "MB" innodb mysql.innodb_buffer_pool_bytes area $((mysql_priority + 21)) $mysql_update_every
+DIMENSION Innodb_buffer_pool_bytes_data data absolute 1 $((1024 * 1024))
+DIMENSION Innodb_buffer_pool_bytes_dirty dirty absolute -1 $((1024 * 1024))
+
+CHART mysql_$x.innodb_buffer_pool_read_ahead '' "mysql InnoDB Buffer Pool Read Ahead" "operations/s" innodb mysql.innodb_buffer_pool_read_ahead area $((mysql_priority + 22)) $mysql_update_every
+DIMENSION Innodb_buffer_pool_read_ahead all incremental 1 1
+DIMENSION Innodb_buffer_pool_read_ahead_evicted evicted incremental -1 1
+DIMENSION Innodb_buffer_pool_read_ahead_rnd random incremental 1 1
+
+CHART mysql_$x.innodb_buffer_pool_reqs '' "mysql InnoDB Buffer Pool Requests" "requests/s" innodb mysql.innodb_buffer_pool_reqs area $((mysql_priority + 23)) $mysql_update_every
+DIMENSION Innodb_buffer_pool_read_requests reads incremental 1 1
+DIMENSION Innodb_buffer_pool_write_requests writes incremental -1 1
+
+CHART mysql_$x.innodb_buffer_pool_ops '' "mysql InnoDB Buffer Pool Operations" "operations/s" innodb mysql.innodb_buffer_pool_ops area $((mysql_priority + 24)) $mysql_update_every
+DIMENSION Innodb_buffer_pool_reads 'disk reads' incremental 1 1
+DIMENSION Innodb_buffer_pool_wait_free 'wait free' incremental -1 1
+
+CHART mysql_$x.qcache_ops '' "mysql QCache Operations" "queries/s" qcache mysql.qcache_ops line $((mysql_priority + 25)) $mysql_update_every
+DIMENSION Qcache_hits hits incremental 1 1
+DIMENSION Qcache_lowmem_prunes 'lowmem prunes' incremental -1 1
+DIMENSION Qcache_inserts inserts incremental 1 1
+DIMENSION Qcache_not_cached 'not cached' incremental -1 1
+
+CHART mysql_$x.qcache '' "mysql QCache Queries in Cache" "queries" qcache mysql.qcache line $((mysql_priority + 26)) $mysql_update_every
+DIMENSION Qcache_queries_in_cache queries absolute 1 1
+
+CHART mysql_$x.qcache_freemem '' "mysql QCache Free Memory" "MB" qcache mysql.qcache_freemem area $((mysql_priority + 27)) $mysql_update_every
+DIMENSION Qcache_free_memory free absolute 1 $((1024 * 1024))
+
+CHART mysql_$x.qcache_memblocks '' "mysql QCache Memory Blocks" "blocks" qcache mysql.qcache_memblocks line $((mysql_priority + 28)) $mysql_update_every
+DIMENSION Qcache_free_blocks free absolute 1 1
+DIMENSION Qcache_total_blocks total absolute 1 1
+
+CHART mysql_$x.key_blocks '' "mysql MyISAM Key Cache Blocks" "blocks" myisam mysql.key_blocks line $((mysql_priority + 29)) $mysql_update_every
+DIMENSION Key_blocks_unused unused absolute 1 1
+DIMENSION Key_blocks_used used absolute -1 1
+DIMENSION Key_blocks_not_flushed 'not flushed' absolute 1 1
+
+CHART mysql_$x.key_requests '' "mysql MyISAM Key Cache Requests" "requests/s" myisam mysql.key_requests area $((mysql_priority + 30)) $mysql_update_every
+DIMENSION Key_read_requests reads incremental 1 1
+DIMENSION Key_write_requests writes incremental -1 1
+
+CHART mysql_$x.key_disk_ops '' "mysql MyISAM Key Cache Disk Operations" "operations/s" myisam mysql.key_disk_ops area $((mysql_priority + 31)) $mysql_update_every
+DIMENSION Key_reads reads incremental 1 1
+DIMENSION Key_writes writes incremental -1 1
+
+CHART mysql_$x.files '' "mysql Open Files" "files" files mysql.files line $((mysql_priority + 32)) $mysql_update_every
+DIMENSION Open_files files absolute 1 1
+
+CHART mysql_$x.files_rate '' "mysql Opened Files Rate" "files/s" files mysql.files_rate line $((mysql_priority + 33)) $mysql_update_every
+DIMENSION Opened_files files incremental 1 1
+EOF
+
+ if [ ! -z "${mysql_data[Binlog_stmt_cache_disk_use]}" ]
+ then
+ cat <<EOF
+CHART mysql_$x.binlog_stmt_cache '' "mysql Binlog Statement Cache" "statements/s" binlog mysql.binlog_stmt_cache line $((mysql_priority + 50)) $mysql_update_every
+DIMENSION Binlog_stmt_cache_disk_use disk incremental 1 1
+DIMENSION Binlog_stmt_cache_use all incremental 1 1
+EOF
+ fi
+
+ if [ ! -z "${mysql_data[Connection_errors_accept]}" ]
+ then
+ cat <<EOF
+CHART mysql_$x.connection_errors '' "mysql Connection Errors" "connections/s" connections mysql.connection_errors line $((mysql_priority + 51)) $mysql_update_every
+DIMENSION Connection_errors_accept accept incremental 1 1
+DIMENSION Connection_errors_internal internal incremental 1 1
+DIMENSION Connection_errors_max_connections max incremental 1 1
+DIMENSION Connection_errors_peer_addr peer_addr incremental 1 1
+DIMENSION Connection_errors_select select incremental 1 1
+DIMENSION Connection_errors_tcpwrap tcpwrap incremental 1 1
+EOF
+ fi
+
+ done
+ return 0
+}
+
+
+mysql_update() {
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
+
+ # do all the work to collect / calculate the values
+ # for each dimension
+ # remember: KEEP IT SIMPLE AND SHORT
+
+ local m x
+ for m in "${!mysql_ids[@]}"
+ do
+ x="${mysql_ids[$m]}"
+ mysql_get "${mysql_cmds[$m]}" ${mysql_opts[$m]}
+
+ # shellcheck disable=SC2181
+ if [ $? -ne 0 ]
+ then
+ unset "mysql_ids[$m]"
+ unset "mysql_opts[$m]"
+ unset "mysql_cmds[$m]"
+ error "failed to get values for '${m}', disabling it."
+ continue
+ fi
+
+ # write the result of the work.
+ cat <<VALUESEOF
+BEGIN mysql_$x.net $1
+SET Bytes_received = ${mysql_data[Bytes_received]}
+SET Bytes_sent = ${mysql_data[Bytes_sent]}
+END
+BEGIN mysql_$x.queries $1
+SET Queries = ${mysql_data[Queries]}
+SET Questions = ${mysql_data[Questions]}
+SET Slow_queries = ${mysql_data[Slow_queries]}
+END
+BEGIN mysql_$x.handlers $1
+SET Handler_commit = ${mysql_data[Handler_commit]}
+SET Handler_delete = ${mysql_data[Handler_delete]}
+SET Handler_prepare = ${mysql_data[Handler_prepare]}
+SET Handler_read_first = ${mysql_data[Handler_read_first]}
+SET Handler_read_key = ${mysql_data[Handler_read_key]}
+SET Handler_read_next = ${mysql_data[Handler_read_next]}
+SET Handler_read_prev = ${mysql_data[Handler_read_prev]}
+SET Handler_read_rnd = ${mysql_data[Handler_read_rnd]}
+SET Handler_read_rnd_next = ${mysql_data[Handler_read_rnd_next]}
+SET Handler_rollback = ${mysql_data[Handler_rollback]}
+SET Handler_savepoint = ${mysql_data[Handler_savepoint]}
+SET Handler_savepoint_rollback = ${mysql_data[Handler_savepoint_rollback]}
+SET Handler_update = ${mysql_data[Handler_update]}
+SET Handler_write = ${mysql_data[Handler_write]}
+END
+BEGIN mysql_$x.table_locks $1
+SET Table_locks_immediate = ${mysql_data[Table_locks_immediate]}
+SET Table_locks_waited = ${mysql_data[Table_locks_waited]}
+END
+BEGIN mysql_$x.join_issues $1
+SET Select_full_join = ${mysql_data[Select_full_join]}
+SET Select_full_range_join = ${mysql_data[Select_full_range_join]}
+SET Select_range = ${mysql_data[Select_range]}
+SET Select_range_check = ${mysql_data[Select_range_check]}
+SET Select_scan = ${mysql_data[Select_scan]}
+END
+BEGIN mysql_$x.sort_issues $1
+SET Sort_merge_passes = ${mysql_data[Sort_merge_passes]}
+SET Sort_range = ${mysql_data[Sort_range]}
+SET Sort_scan = ${mysql_data[Sort_scan]}
+END
+BEGIN mysql_$x.tmp $1
+SET Created_tmp_disk_tables = ${mysql_data[Created_tmp_disk_tables]}
+SET Created_tmp_files = ${mysql_data[Created_tmp_files]}
+SET Created_tmp_tables = ${mysql_data[Created_tmp_tables]}
+END
+BEGIN mysql_$x.connections $1
+SET Connections = ${mysql_data[Connections]}
+SET Aborted_connects = ${mysql_data[Aborted_connects]}
+END
+BEGIN mysql_$x.binlog_cache $1
+SET Binlog_cache_disk_use = ${mysql_data[Binlog_cache_disk_use]}
+SET Binlog_cache_use = ${mysql_data[Binlog_cache_use]}
+END
+BEGIN mysql_$x.threads $1
+SET Threads_connected = ${mysql_data[Threads_connected]}
+SET Threads_created = ${mysql_data[Threads_created]}
+SET Threads_cached = ${mysql_data[Threads_cached]}
+SET Threads_running = ${mysql_data[Threads_running]}
+END
+BEGIN mysql_$x.thread_cache_misses $1
+SET misses = ${mysql_data[Thread_cache_misses]}
+END
+BEGIN mysql_$x.innodb_io $1
+SET Innodb_data_read = ${mysql_data[Innodb_data_read]}
+SET Innodb_data_written = ${mysql_data[Innodb_data_written]}
+END
+BEGIN mysql_$x.innodb_io_ops $1
+SET Innodb_data_reads = ${mysql_data[Innodb_data_reads]}
+SET Innodb_data_writes = ${mysql_data[Innodb_data_writes]}
+SET Innodb_data_fsyncs = ${mysql_data[Innodb_data_fsyncs]}
+END
+BEGIN mysql_$x.innodb_io_pending_ops $1
+SET Innodb_data_pending_reads = ${mysql_data[Innodb_data_pending_reads]}
+SET Innodb_data_pending_writes = ${mysql_data[Innodb_data_pending_writes]}
+SET Innodb_data_pending_fsyncs = ${mysql_data[Innodb_data_pending_fsyncs]}
+END
+BEGIN mysql_$x.innodb_log $1
+SET Innodb_log_waits = ${mysql_data[Innodb_log_waits]}
+SET Innodb_log_write_requests = ${mysql_data[Innodb_log_write_requests]}
+SET Innodb_log_writes = ${mysql_data[Innodb_log_writes]}
+END
+BEGIN mysql_$x.innodb_os_log $1
+SET Innodb_os_log_fsyncs = ${mysql_data[Innodb_os_log_fsyncs]}
+SET Innodb_os_log_pending_fsyncs = ${mysql_data[Innodb_os_log_pending_fsyncs]}
+SET Innodb_os_log_pending_writes = ${mysql_data[Innodb_os_log_pending_writes]}
+END
+BEGIN mysql_$x.innodb_os_log_io $1
+SET Innodb_os_log_written = ${mysql_data[Innodb_os_log_written]}
+END
+BEGIN mysql_$x.innodb_cur_row_lock $1
+SET Innodb_row_lock_current_waits = ${mysql_data[Innodb_row_lock_current_waits]}
+END
+BEGIN mysql_$x.innodb_rows $1
+SET Innodb_rows_inserted = ${mysql_data[Innodb_rows_inserted]}
+SET Innodb_rows_read = ${mysql_data[Innodb_rows_read]}
+SET Innodb_rows_updated = ${mysql_data[Innodb_rows_updated]}
+SET Innodb_rows_deleted = ${mysql_data[Innodb_rows_deleted]}
+END
+BEGIN mysql_$x.innodb_buffer_pool_pages $1
+SET Innodb_buffer_pool_pages_data = ${mysql_data[Innodb_buffer_pool_pages_data]}
+SET Innodb_buffer_pool_pages_dirty = ${mysql_data[Innodb_buffer_pool_pages_dirty]}
+SET Innodb_buffer_pool_pages_free = ${mysql_data[Innodb_buffer_pool_pages_free]}
+SET Innodb_buffer_pool_pages_flushed = ${mysql_data[Innodb_buffer_pool_pages_flushed]}
+SET Innodb_buffer_pool_pages_misc = ${mysql_data[Innodb_buffer_pool_pages_misc]}
+SET Innodb_buffer_pool_pages_total = ${mysql_data[Innodb_buffer_pool_pages_total]}
+END
+BEGIN mysql_$x.innodb_buffer_pool_bytes $1
+SET Innodb_buffer_pool_bytes_data = ${mysql_data[Innodb_buffer_pool_bytes_data]}
+SET Innodb_buffer_pool_bytes_dirty = ${mysql_data[Innodb_buffer_pool_bytes_dirty]}
+END
+BEGIN mysql_$x.innodb_buffer_pool_read_ahead $1
+SET Innodb_buffer_pool_read_ahead = ${mysql_data[Innodb_buffer_pool_read_ahead]}
+SET Innodb_buffer_pool_read_ahead_evicted = ${mysql_data[Innodb_buffer_pool_read_ahead_evicted]}
+SET Innodb_buffer_pool_read_ahead_rnd = ${mysql_data[Innodb_buffer_pool_read_ahead_rnd]}
+END
+BEGIN mysql_$x.innodb_buffer_pool_reqs $1
+SET Innodb_buffer_pool_read_requests = ${mysql_data[Innodb_buffer_pool_read_requests]}
+SET Innodb_buffer_pool_write_requests = ${mysql_data[Innodb_buffer_pool_write_requests]}
+END
+BEGIN mysql_$x.innodb_buffer_pool_ops $1
+SET Innodb_buffer_pool_reads = ${mysql_data[Innodb_buffer_pool_reads]}
+SET Innodb_buffer_pool_wait_free = ${mysql_data[Innodb_buffer_pool_wait_free]}
+END
+BEGIN mysql_$x.qcache_ops $1
+SET Qcache_hits hits = ${mysql_data[Qcache_hits]}
+SET Qcache_lowmem_prunes = ${mysql_data[Qcache_lowmem_prunes]}
+SET Qcache_inserts = ${mysql_data[Qcache_inserts]}
+SET Qcache_not_cached = ${mysql_data[Qcache_not_cached]}
+END
+BEGIN mysql_$x.qcache $1
+SET Qcache_queries_in_cache = ${mysql_data[Qcache_queries_in_cache]}
+END
+BEGIN mysql_$x.qcache_freemem $1
+SET Qcache_free_memory = ${mysql_data[Qcache_free_memory]}
+END
+BEGIN mysql_$x.qcache_memblocks $1
+SET Qcache_free_blocks = ${mysql_data[Qcache_free_blocks]}
+SET Qcache_total_blocks = ${mysql_data[Qcache_total_blocks]}
+END
+BEGIN mysql_$x.key_blocks $1
+SET Key_blocks_unused = ${mysql_data[Key_blocks_unused]}
+SET Key_blocks_used = ${mysql_data[Key_blocks_used]}
+SET Key_blocks_not_flushed = ${mysql_data[Key_blocks_not_flushed]}
+END
+BEGIN mysql_$x.key_requests $1
+SET Key_read_requests = ${mysql_data[Key_read_requests]}
+SET Key_write_requests = ${mysql_data[Key_write_requests]}
+END
+BEGIN mysql_$x.key_disk_ops $1
+SET Key_reads = ${mysql_data[Key_reads]}
+SET Key_writes = ${mysql_data[Key_writes]}
+END
+BEGIN mysql_$x.files $1
+SET Open_files = ${mysql_data[Open_files]}
+END
+BEGIN mysql_$x.files_rate $1
+SET Opened_files = ${mysql_data[Opened_files]}
+END
+VALUESEOF
+
+ if [ ! -z "${mysql_data[Binlog_stmt_cache_disk_use]}" ]
+ then
+ cat <<VALUESEOF
+BEGIN mysql_$x.binlog_stmt_cache $1
+SET Binlog_stmt_cache_disk_use = ${mysql_data[Binlog_stmt_cache_disk_use]}
+SET Binlog_stmt_cache_use = ${mysql_data[Binlog_stmt_cache_use]}
+END
+VALUESEOF
+ fi
+
+ if [ ! -z "${mysql_data[Connection_errors_accept]}" ]
+ then
+ cat <<VALUESEOF
+BEGIN mysql_$x.connection_errors $1
+SET Connection_errors_accept = ${mysql_data[Connection_errors_accept]}
+SET Connection_errors_internal = ${mysql_data[Connection_errors_internal]}
+SET Connection_errors_max_connections = ${mysql_data[Connection_errors_max_connections]}
+SET Connection_errors_peer_addr = ${mysql_data[Connection_errors_peer_addr]}
+SET Connection_errors_select = ${mysql_data[Connection_errors_select]}
+SET Connection_errors_tcpwrap = ${mysql_data[Connection_errors_tcpwrap]}
+END
+VALUESEOF
+ fi
+ done
+
+ [ ${#mysql_ids[@]} -eq 0 ] && error "no mysql servers left active." && return 1
+ return 0
+}
+
diff --git a/collectors/charts.d.plugin/mysql/mysql.conf b/collectors/charts.d.plugin/mysql/mysql.conf
new file mode 100644
index 000000000..683e4af35
--- /dev/null
+++ b/collectors/charts.d.plugin/mysql/mysql.conf
@@ -0,0 +1,23 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+# THIS PLUGIN IS DEPRECATED
+# USE THE PYTHON.D ONE
+
+#mysql_cmds[name]=""
+#mysql_opts[name]=""
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#mysql_update_every=2
+
+# the charts priority on the dashboard
+#mysql_priority=60000
+
+# the number of retries to do in case of failure
+# before disabling the module
+#mysql_retries=10
diff --git a/collectors/charts.d.plugin/nginx/Makefile.inc b/collectors/charts.d.plugin/nginx/Makefile.inc
new file mode 100644
index 000000000..c9d31aada
--- /dev/null
+++ b/collectors/charts.d.plugin/nginx/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += nginx/nginx.chart.sh
+dist_chartsconfig_DATA += nginx/nginx.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += nginx/README.md nginx/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/nginx/README.md b/collectors/charts.d.plugin/nginx/README.md
new file mode 100644
index 000000000..d82951aac
--- /dev/null
+++ b/collectors/charts.d.plugin/nginx/README.md
@@ -0,0 +1,2 @@
+> THIS MODULE IS OBSOLETE.
+> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
diff --git a/collectors/charts.d.plugin/nginx/nginx.chart.sh b/collectors/charts.d.plugin/nginx/nginx.chart.sh
new file mode 100644
index 000000000..14dda0832
--- /dev/null
+++ b/collectors/charts.d.plugin/nginx/nginx.chart.sh
@@ -0,0 +1,144 @@
+# shellcheck shell=bash
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+#
+
+# if this chart is called X.chart.sh, then all functions and global variables
+# must start with X_
+
+nginx_url="http://127.0.0.1:80/stub_status"
+nginx_curl_opts=""
+
+# _update_every is a special variable - it holds the number of seconds
+# between the calls of the _update() function
+nginx_update_every=
+nginx_priority=60000
+
+declare -a nginx_response=()
+nginx_active_connections=0
+nginx_accepts=0
+nginx_handled=0
+nginx_requests=0
+nginx_reading=0
+nginx_writing=0
+nginx_waiting=0
+nginx_get() {
+ # shellcheck disable=SC2207
+ nginx_response=($(run curl -Ss ${nginx_curl_opts} "${nginx_url}"))
+ # shellcheck disable=SC2181
+ if [ $? -ne 0 ] || [ "${#nginx_response[@]}" -eq 0 ]; then return 1; fi
+
+ if [ "${nginx_response[0]}" != "Active" ] ||\
+ [ "${nginx_response[1]}" != "connections:" ] ||\
+ [ "${nginx_response[3]}" != "server" ] ||\
+ [ "${nginx_response[4]}" != "accepts" ] ||\
+ [ "${nginx_response[5]}" != "handled" ] ||\
+ [ "${nginx_response[6]}" != "requests" ] ||\
+ [ "${nginx_response[10]}" != "Reading:" ] ||\
+ [ "${nginx_response[12]}" != "Writing:" ] ||\
+ [ "${nginx_response[14]}" != "Waiting:" ]
+ then
+ error "Invalid response from nginx server: ${nginx_response[*]}"
+ return 1
+ fi
+
+ nginx_active_connections="${nginx_response[2]}"
+ nginx_accepts="${nginx_response[7]}"
+ nginx_handled="${nginx_response[8]}"
+ nginx_requests="${nginx_response[9]}"
+ nginx_reading="${nginx_response[11]}"
+ nginx_writing="${nginx_response[13]}"
+ nginx_waiting="${nginx_response[15]}"
+
+ if [ -z "${nginx_active_connections}" ] ||\
+ [ -z "${nginx_accepts}" ] ||\
+ [ -z "${nginx_handled}" ] ||\
+ [ -z "${nginx_requests}" ] ||\
+ [ -z "${nginx_reading}" ] ||\
+ [ -z "${nginx_writing}" ] ||\
+ [ -z "${nginx_waiting}" ]
+ then
+ error "empty values got from nginx server: ${nginx_response[*]}"
+ return 1
+ fi
+
+ return 0
+}
+
+# _check is called once, to find out if this chart should be enabled or not
+nginx_check() {
+
+ nginx_get
+ # shellcheck disable=2181
+ if [ $? -ne 0 ]
+ then
+ # shellcheck disable=SC2154
+ error "cannot find stub_status on URL '${nginx_url}'. Please set nginx_url='http://nginx.server/stub_status' in $confd/nginx.conf"
+ return 1
+ fi
+
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
+
+ return 0
+}
+
+# _create is called once, to create the charts
+nginx_create() {
+ cat <<EOF
+CHART nginx_local.connections '' "nginx Active Connections" "connections" nginx nginx.connections line $((nginx_priority + 1)) $nginx_update_every
+DIMENSION active '' absolute 1 1
+
+CHART nginx_local.requests '' "nginx Requests" "requests/s" nginx nginx.requests line $((nginx_priority + 2)) $nginx_update_every
+DIMENSION requests '' incremental 1 1
+
+CHART nginx_local.connections_status '' "nginx Active Connections by Status" "connections" nginx nginx.connections.status line $((nginx_priority + 3)) $nginx_update_every
+DIMENSION reading '' absolute 1 1
+DIMENSION writing '' absolute 1 1
+DIMENSION waiting idle absolute 1 1
+
+CHART nginx_local.connect_rate '' "nginx Connections Rate" "connections/s" nginx nginx.connections.rate line $((nginx_priority + 4)) $nginx_update_every
+DIMENSION accepts accepted incremental 1 1
+DIMENSION handled '' incremental 1 1
+EOF
+
+ return 0
+}
+
+# _update is called continuously, to collect the values
+nginx_update() {
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
+
+ # do all the work to collect / calculate the values
+ # for each dimension
+ # remember: KEEP IT SIMPLE AND SHORT
+
+ nginx_get || return 1
+
+ # write the result of the work.
+ cat <<VALUESEOF
+BEGIN nginx_local.connections $1
+SET active = $((nginx_active_connections))
+END
+BEGIN nginx_local.requests $1
+SET requests = $((nginx_requests))
+END
+BEGIN nginx_local.connections_status $1
+SET reading = $((nginx_reading))
+SET writing = $((nginx_writing))
+SET waiting = $((nginx_waiting))
+END
+BEGIN nginx_local.connect_rate $1
+SET accepts = $((nginx_accepts))
+SET handled = $((nginx_handled))
+END
+VALUESEOF
+
+ return 0
+}
diff --git a/collectors/charts.d.plugin/nginx/nginx.conf b/collectors/charts.d.plugin/nginx/nginx.conf
new file mode 100644
index 000000000..c46100a58
--- /dev/null
+++ b/collectors/charts.d.plugin/nginx/nginx.conf
@@ -0,0 +1,23 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+# THIS PLUGIN IS DEPRECATED
+# USE THE PYTHON.D ONE
+
+#nginx_url="http://127.0.0.1:80/stub_status"
+#nginx_curl_opts=""
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#nginx_update_every=
+
+# the charts priority on the dashboard
+#nginx_priority=60000
+
+# the number of retries to do in case of failure
+# before disabling the module
+#nginx_retries=10
diff --git a/collectors/charts.d.plugin/nut/Makefile.inc b/collectors/charts.d.plugin/nut/Makefile.inc
new file mode 100644
index 000000000..4fb47145d
--- /dev/null
+++ b/collectors/charts.d.plugin/nut/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += nut/nut.chart.sh
+dist_chartsconfig_DATA += nut/nut.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += nut/README.md nut/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/nut/README.md b/collectors/charts.d.plugin/nut/README.md
new file mode 100644
index 000000000..71906f55a
--- /dev/null
+++ b/collectors/charts.d.plugin/nut/README.md
@@ -0,0 +1,59 @@
+# nut
+
+The plugin will collect UPS data for all UPSes configured in the system.
+
+The following charts will be created:
+
+1. **UPS Charge**
+
+ * percentage changed
+
+2. **UPS Battery Voltage**
+
+ * current voltage
+ * high voltage
+ * low voltage
+ * nominal voltage
+
+3. **UPS Input Voltage**
+
+ * current voltage
+ * fault voltage
+ * nominal voltage
+
+4. **UPS Input Current**
+
+ * nominal current
+
+5. **UPS Input Frequency**
+
+ * current frequency
+ * nominal frequency
+
+6. **UPS Output Voltage**
+
+ * current voltage
+
+7. **UPS Load**
+
+ * current load
+
+8. **UPS Temperature**
+
+ * current temperature
+
+
+### configuration
+
+This is the internal default for `/etc/netdata/nut.conf`
+
+```sh
+# a space separated list of UPS names
+# if empty, the list returned by 'upsc -l' will be used
+nut_ups=
+
+# how frequently to collect UPS data
+nut_update_every=2
+```
+
+---
diff --git a/collectors/charts.d.plugin/nut/nut.chart.sh b/collectors/charts.d.plugin/nut/nut.chart.sh
new file mode 100644
index 000000000..7e252f325
--- /dev/null
+++ b/collectors/charts.d.plugin/nut/nut.chart.sh
@@ -0,0 +1,241 @@
+# shellcheck shell=bash
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016-2017 Costa Tsaousis <costa@tsaousis.gr>
+#
+
+# a space separated list of UPS names
+# if empty, the list returned by 'upsc -l' will be used
+nut_ups=
+
+# how frequently to collect UPS data
+nut_update_every=2
+
+# how much time in seconds, to wait for nut to respond
+nut_timeout=2
+
+# set this to 1, to enable another chart showing the number
+# of UPS clients connected to upsd
+nut_clients_chart=0
+
+# the priority of nut related to other charts
+nut_priority=90000
+
+declare -A nut_ids=()
+declare -A nut_names=()
+
+nut_get_all() {
+ run -t $nut_timeout upsc -l
+}
+
+nut_get() {
+ run -t $nut_timeout upsc "$1"
+
+ if [ "${nut_clients_chart}" -eq "1" ]
+ then
+ printf "ups.connected_clients: "
+ run -t $nut_timeout upsc -c "$1" | wc -l
+ fi
+}
+
+nut_check() {
+
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
+
+ local x
+
+ require_cmd upsc || return 1
+
+ [ -z "$nut_ups" ] && nut_ups="$( nut_get_all )"
+
+ for x in $nut_ups
+ do
+ nut_get "$x" >/dev/null
+ # shellcheck disable=SC2181
+ if [ $? -eq 0 ]
+ then
+ if [ ! -z "${nut_names[${x}]}" ]
+ then
+ nut_ids[$x]="$( fixid "${nut_names[${x}]}" )"
+ else
+ nut_ids[$x]="$( fixid "$x" )"
+ fi
+ continue
+ fi
+ error "cannot get information for NUT UPS '$x'."
+ done
+
+ if [ ${#nut_ids[@]} -eq 0 ]
+ then
+ # shellcheck disable=SC2154
+ error "Cannot find UPSes - please set nut_ups='ups_name' in $confd/nut.conf"
+ return 1
+ fi
+
+ return 0
+}
+
+nut_create() {
+ # create the charts
+ local x
+
+ for x in "${nut_ids[@]}"
+ do
+ cat <<EOF
+CHART nut_$x.charge '' "UPS Charge" "percentage" ups nut.charge area $((nut_priority + 1)) $nut_update_every
+DIMENSION battery_charge charge absolute 1 100
+
+CHART nut_$x.runtime '' "UPS Runtime" "seconds" ups nut.runtime area $((nut_priority + 2)) $nut_update_every
+DIMENSION battery_runtime runtime absolute 1 100
+
+CHART nut_$x.battery_voltage '' "UPS Battery Voltage" "Volts" ups nut.battery.voltage line $((nut_priority + 3)) $nut_update_every
+DIMENSION battery_voltage voltage absolute 1 100
+DIMENSION battery_voltage_high high absolute 1 100
+DIMENSION battery_voltage_low low absolute 1 100
+DIMENSION battery_voltage_nominal nominal absolute 1 100
+
+CHART nut_$x.input_voltage '' "UPS Input Voltage" "Volts" input nut.input.voltage line $((nut_priority + 4)) $nut_update_every
+DIMENSION input_voltage voltage absolute 1 100
+DIMENSION input_voltage_fault fault absolute 1 100
+DIMENSION input_voltage_nominal nominal absolute 1 100
+
+CHART nut_$x.input_current '' "UPS Input Current" "Ampere" input nut.input.current line $((nut_priority + 5)) $nut_update_every
+DIMENSION input_current_nominal nominal absolute 1 100
+
+CHART nut_$x.input_frequency '' "UPS Input Frequency" "Hz" input nut.input.frequency line $((nut_priority + 6)) $nut_update_every
+DIMENSION input_frequency frequency absolute 1 100
+DIMENSION input_frequency_nominal nominal absolute 1 100
+
+CHART nut_$x.output_voltage '' "UPS Output Voltage" "Volts" output nut.output.voltage line $((nut_priority + 7)) $nut_update_every
+DIMENSION output_voltage voltage absolute 1 100
+
+CHART nut_$x.load '' "UPS Load" "percentage" ups nut.load area $((nut_priority)) $nut_update_every
+DIMENSION load load absolute 1 100
+
+CHART nut_$x.temp '' "UPS Temperature" "temperature" ups nut.temperature line $((nut_priority + 8)) $nut_update_every
+DIMENSION temp temp absolute 1 100
+EOF
+
+ if [ "${nut_clients_chart}" = "1" ]
+ then
+ cat <<EOF2
+CHART nut_$x.clients '' "UPS Connected Clients" "clients" ups nut.clients area $((nut_priority + 9)) $nut_update_every
+DIMENSION clients '' absolute 1 1
+EOF2
+ fi
+
+ done
+
+ return 0
+}
+
+
+nut_update() {
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
+
+ # do all the work to collect / calculate the values
+ # for each dimension
+ # remember: KEEP IT SIMPLE AND SHORT
+
+ local i x
+ for i in "${!nut_ids[@]}"
+ do
+ x="${nut_ids[$i]}"
+ nut_get "$i" | awk "
+BEGIN {
+ battery_charge = 0;
+ battery_runtime = 0;
+ battery_voltage = 0;
+ battery_voltage_high = 0;
+ battery_voltage_low = 0;
+ battery_voltage_nominal = 0;
+ input_voltage = 0;
+ input_voltage_fault = 0;
+ input_voltage_nominal = 0;
+ input_current_nominal = 0;
+ input_frequency = 0;
+ input_frequency_nominal = 0;
+ output_voltage = 0;
+ load = 0;
+ temp = 0;
+ client = 0;
+ do_clients = ${nut_clients_chart};
+}
+/^battery.charge: .*/ { battery_charge = \$2 * 100 };
+/^battery.runtime: .*/ { battery_runtime = \$2 * 100 };
+/^battery.voltage: .*/ { battery_voltage = \$2 * 100 };
+/^battery.voltage.high: .*/ { battery_voltage_high = \$2 * 100 };
+/^battery.voltage.low: .*/ { battery_voltage_low = \$2 * 100 };
+/^battery.voltage.nominal: .*/ { battery_voltage_nominal = \$2 * 100 };
+/^input.voltage: .*/ { input_voltage = \$2 * 100 };
+/^input.voltage.fault: .*/ { input_voltage_fault = \$2 * 100 };
+/^input.voltage.nominal: .*/ { input_voltage_nominal = \$2 * 100 };
+/^input.current.nominal: .*/ { input_current_nominal = \$2 * 100 };
+/^input.frequency: .*/ { input_frequency = \$2 * 100 };
+/^input.frequency.nominal: .*/ { input_frequency_nominal = \$2 * 100 };
+/^output.voltage: .*/ { output_voltage = \$2 * 100 };
+/^ups.load: .*/ { load = \$2 * 100 };
+/^ups.temperature: .*/ { temp = \$2 * 100 };
+/^ups.connected_clients: .*/ { clients = \$2 };
+END {
+ print \"BEGIN nut_$x.charge $1\";
+ print \"SET battery_charge = \" battery_charge;
+ print \"END\"
+
+ print \"BEGIN nut_$x.runtime $1\";
+ print \"SET battery_runtime = \" battery_runtime;
+ print \"END\"
+
+ print \"BEGIN nut_$x.battery_voltage $1\";
+ print \"SET battery_voltage = \" battery_voltage;
+ print \"SET battery_voltage_high = \" battery_voltage_high;
+ print \"SET battery_voltage_low = \" battery_voltage_low;
+ print \"SET battery_voltage_nominal = \" battery_voltage_nominal;
+ print \"END\"
+
+ print \"BEGIN nut_$x.input_voltage $1\";
+ print \"SET input_voltage = \" input_voltage;
+ print \"SET input_voltage_fault = \" input_voltage_fault;
+ print \"SET input_voltage_nominal = \" input_voltage_nominal;
+ print \"END\"
+
+ print \"BEGIN nut_$x.input_current $1\";
+ print \"SET input_current_nominal = \" input_current_nominal;
+ print \"END\"
+
+ print \"BEGIN nut_$x.input_frequency $1\";
+ print \"SET input_frequency = \" input_frequency;
+ print \"SET input_frequency_nominal = \" input_frequency_nominal;
+ print \"END\"
+
+ print \"BEGIN nut_$x.output_voltage $1\";
+ print \"SET output_voltage = \" output_voltage;
+ print \"END\"
+
+ print \"BEGIN nut_$x.load $1\";
+ print \"SET load = \" load;
+ print \"END\"
+
+ print \"BEGIN nut_$x.temp $1\";
+ print \"SET temp = \" temp;
+ print \"END\"
+
+ if(do_clients) {
+ print \"BEGIN nut_$x.clients $1\";
+ print \"SET clients = \" clients;
+ print \"END\"
+ }
+}"
+ # shellcheck disable=2181
+ [ $? -ne 0 ] && unset "nut_ids[$i]" && error "failed to get values for '$i', disabling it."
+ done
+
+ [ ${#nut_ids[@]} -eq 0 ] && error "no UPSes left active." && return 1
+ return 0
+}
diff --git a/collectors/charts.d.plugin/nut/nut.conf b/collectors/charts.d.plugin/nut/nut.conf
new file mode 100644
index 000000000..b95ad9048
--- /dev/null
+++ b/collectors/charts.d.plugin/nut/nut.conf
@@ -0,0 +1,33 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+# a space separated list of UPS names
+# if empty, the list returned by 'upsc -l' will be used
+#nut_ups=
+
+# each line represents an alias for one UPS
+# if empty, the FQDN will be used
+#nut_names["FQDN1"]="alias"
+#nut_names["FQDN2"]="alias"
+
+# how much time in seconds, to wait for nut to respond
+#nut_timeout=2
+
+# set this to 1, to enable another chart showing the number
+# of UPS clients connected to upsd
+#nut_clients_chart=1
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#nut_update_every=2
+
+# the charts priority on the dashboard
+#nut_priority=90000
+
+# the number of retries to do in case of failure
+# before disabling the module
+#nut_retries=10
diff --git a/collectors/charts.d.plugin/opensips/Makefile.inc b/collectors/charts.d.plugin/opensips/Makefile.inc
new file mode 100644
index 000000000..a7b5d3a92
--- /dev/null
+++ b/collectors/charts.d.plugin/opensips/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += opensips/opensips.chart.sh
+dist_chartsconfig_DATA += opensips/opensips.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += opensips/README.md opensips/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/opensips/README.md b/collectors/charts.d.plugin/opensips/README.md
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/collectors/charts.d.plugin/opensips/README.md
diff --git a/collectors/charts.d.plugin/opensips/opensips.chart.sh b/collectors/charts.d.plugin/opensips/opensips.chart.sh
new file mode 100644
index 000000000..c227bd4f2
--- /dev/null
+++ b/collectors/charts.d.plugin/opensips/opensips.chart.sh
@@ -0,0 +1,326 @@
+# shellcheck shell=bash disable=SC1117,SC2154,SC2086
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+#
+
+opensips_opts="fifo get_statistics all"
+opensips_cmd=
+opensips_update_every=5
+opensips_timeout=2
+opensips_priority=80000
+
+opensips_get_stats() {
+ run -t $opensips_timeout "$opensips_cmd" $opensips_opts |\
+ grep "^\(core\|dialog\|net\|registrar\|shmem\|siptrace\|sl\|tm\|uri\|usrloc\):[a-zA-Z0-9_-]\+[[:space:]]*[=:]\+[[:space:]]*[0-9]\+[[:space:]]*$" |\
+ sed \
+ -e "s|[[:space:]]*[=:]\+[[:space:]]*\([0-9]\+\)[[:space:]]*$|=\1|g" \
+ -e "s|[[:space:]:-]\+|_|g" \
+ -e "s|^|opensips_|g"
+
+ local ret=$?
+ [ $ret -ne 0 ] && echo "opensips_command_failed=1"
+ return $ret
+}
+
+opensips_check() {
+ # if the user did not provide an opensips_cmd
+ # try to find it in the system
+ if [ -z "$opensips_cmd" ]
+ then
+ require_cmd opensipsctl || return 1
+ fi
+
+ # check once if the command works
+ local x
+ x="$(opensips_get_stats | grep "^opensips_core_")"
+ # shellcheck disable=SC2181
+ if [ ! $? -eq 0 ] || [ -z "$x" ]
+ then
+ error "cannot get global status. Please set opensips_opts='options' whatever needed to get connected to opensips server, in $confd/opensips.conf"
+ return 1
+ fi
+
+ return 0
+}
+
+opensips_create() {
+ # create the charts
+ cat <<EOF
+CHART opensips.dialogs_active '' "OpenSIPS Active Dialogs" "dialogs" dialogs '' area $((opensips_priority + 1)) $opensips_update_every
+DIMENSION dialog_active_dialogs active absolute 1 1
+DIMENSION dialog_early_dialogs early absolute -1 1
+
+CHART opensips.users '' "OpenSIPS Users" "users" users '' line $((opensips_priority + 2)) $opensips_update_every
+DIMENSION usrloc_registered_users registered absolute 1 1
+DIMENSION usrloc_location_users location absolute 1 1
+DIMENSION usrloc_location_contacts contacts absolute 1 1
+DIMENSION usrloc_location_expires expires incremental -1 1
+
+CHART opensips.registrar '' "OpenSIPS Registrar" "registrations/s" registrar '' line $((opensips_priority + 3)) $opensips_update_every
+DIMENSION registrar_accepted_regs accepted incremental 1 1
+DIMENSION registrar_rejected_regs rejected incremental -1 1
+
+CHART opensips.transactions '' "OpenSIPS Transactions" "transactions/s" transactions '' line $((opensips_priority + 4)) $opensips_update_every
+DIMENSION tm_UAS_transactions UAS incremental 1 1
+DIMENSION tm_UAC_transactions UAC incremental -1 1
+
+CHART opensips.core_rcv '' "OpenSIPS Core Receives" "queries/s" core '' line $((opensips_priority + 5)) $opensips_update_every
+DIMENSION core_rcv_requests requests incremental 1 1
+DIMENSION core_rcv_replies replies incremental -1 1
+
+CHART opensips.core_fwd '' "OpenSIPS Core Forwards" "queries/s" core '' line $((opensips_priority + 6)) $opensips_update_every
+DIMENSION core_fwd_requests requests incremental 1 1
+DIMENSION core_fwd_replies replies incremental -1 1
+
+CHART opensips.core_drop '' "OpenSIPS Core Drops" "queries/s" core '' line $((opensips_priority + 7)) $opensips_update_every
+DIMENSION core_drop_requests requests incremental 1 1
+DIMENSION core_drop_replies replies incremental -1 1
+
+CHART opensips.core_err '' "OpenSIPS Core Errors" "queries/s" core '' line $((opensips_priority + 8)) $opensips_update_every
+DIMENSION core_err_requests requests incremental 1 1
+DIMENSION core_err_replies replies incremental -1 1
+
+CHART opensips.core_bad '' "OpenSIPS Core Bad" "queries/s" core '' line $((opensips_priority + 9)) $opensips_update_every
+DIMENSION core_bad_URIs_rcvd bad_URIs_rcvd incremental 1 1
+DIMENSION core_unsupported_methods unsupported_methods incremental 1 1
+DIMENSION core_bad_msg_hdr bad_msg_hdr incremental 1 1
+
+CHART opensips.tm_replies '' "OpenSIPS TM Replies" "replies/s" transactions '' line $((opensips_priority + 10)) $opensips_update_every
+DIMENSION tm_received_replies received incremental 1 1
+DIMENSION tm_relayed_replies relayed incremental 1 1
+DIMENSION tm_local_replies local incremental 1 1
+
+CHART opensips.transactions_status '' "OpenSIPS Transactions Status" "transactions/s" transactions '' line $((opensips_priority + 11)) $opensips_update_every
+DIMENSION tm_2xx_transactions 2xx incremental 1 1
+DIMENSION tm_3xx_transactions 3xx incremental 1 1
+DIMENSION tm_4xx_transactions 4xx incremental 1 1
+DIMENSION tm_5xx_transactions 5xx incremental 1 1
+DIMENSION tm_6xx_transactions 6xx incremental 1 1
+
+CHART opensips.transactions_inuse '' "OpenSIPS InUse Transactions" "transactions" transactions '' line $((opensips_priority + 12)) $opensips_update_every
+DIMENSION tm_inuse_transactions inuse absolute 1 1
+
+CHART opensips.sl_replies '' "OpenSIPS SL Replies" "replies/s" core '' line $((opensips_priority + 13)) $opensips_update_every
+DIMENSION sl_1xx_replies 1xx incremental 1 1
+DIMENSION sl_2xx_replies 2xx incremental 1 1
+DIMENSION sl_3xx_replies 3xx incremental 1 1
+DIMENSION sl_4xx_replies 4xx incremental 1 1
+DIMENSION sl_5xx_replies 5xx incremental 1 1
+DIMENSION sl_6xx_replies 6xx incremental 1 1
+DIMENSION sl_sent_replies sent incremental 1 1
+DIMENSION sl_sent_err_replies error incremental 1 1
+DIMENSION sl_received_ACKs ACKed incremental 1 1
+
+CHART opensips.dialogs '' "OpenSIPS Dialogs" "dialogs/s" dialogs '' line $((opensips_priority + 14)) $opensips_update_every
+DIMENSION dialog_processed_dialogs processed incremental 1 1
+DIMENSION dialog_expired_dialogs expired incremental 1 1
+DIMENSION dialog_failed_dialogs failed incremental -1 1
+
+CHART opensips.net_waiting '' "OpenSIPS Network Waiting" "kilobytes" net '' line $((opensips_priority + 15)) $opensips_update_every
+DIMENSION net_waiting_udp UDP absolute 1 1024
+DIMENSION net_waiting_tcp TCP absolute 1 1024
+
+CHART opensips.uri_checks '' "OpenSIPS URI Checks" "checks / sec" uri '' line $((opensips_priority + 16)) $opensips_update_every
+DIMENSION uri_positive_checks positive incremental 1 1
+DIMENSION uri_negative_checks negative incremental -1 1
+
+CHART opensips.traces '' "OpenSIPS Traces" "traces / sec" traces '' line $((opensips_priority + 17)) $opensips_update_every
+DIMENSION siptrace_traced_requests requests incremental 1 1
+DIMENSION siptrace_traced_replies replies incremental -1 1
+
+CHART opensips.shmem '' "OpenSIPS Shared Memory" "kilobytes" mem '' line $((opensips_priority + 18)) $opensips_update_every
+DIMENSION shmem_total_size total absolute 1 1024
+DIMENSION shmem_used_size used absolute 1 1024
+DIMENSION shmem_real_used_size real_used absolute 1 1024
+DIMENSION shmem_max_used_size max_used absolute 1 1024
+DIMENSION shmem_free_size free absolute 1 1024
+
+CHART opensips.shmem_fragments '' "OpenSIPS Shared Memory Fragmentation" "fragments" mem '' line $((opensips_priority + 19)) $opensips_update_every
+DIMENSION shmem_fragments fragments absolute 1 1
+EOF
+
+ return 0
+}
+
+opensips_update() {
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
+
+ # do all the work to collect / calculate the values
+ # for each dimension
+
+ # 1. get the counters page from opensips
+ # 2. sed to remove spaces; replace . with _; remove spaces around =; prepend each line with: local opensips_
+ # 3. egrep lines starting with:
+ # local opensips_client_http_ then one or more of these a-z 0-9 _ then = and one of more of 0-9
+ # local opensips_server_all_ then one or more of these a-z 0-9 _ then = and one of more of 0-9
+ # 4. then execute this as a script with the eval
+ # be very carefull with eval:
+ # prepare the script and always grep at the end the lines that are usefull, so that
+ # even if something goes wrong, no other code can be executed
+
+ unset \
+ opensips_dialog_active_dialogs \
+ opensips_dialog_early_dialogs \
+ opensips_usrloc_registered_users \
+ opensips_usrloc_location_users \
+ opensips_usrloc_location_contacts \
+ opensips_usrloc_location_expires \
+ opensips_registrar_accepted_regs \
+ opensips_registrar_rejected_regs \
+ opensips_tm_UAS_transactions \
+ opensips_tm_UAC_transactions \
+ opensips_core_rcv_requests \
+ opensips_core_rcv_replies \
+ opensips_core_fwd_requests \
+ opensips_core_fwd_replies \
+ opensips_core_drop_requests \
+ opensips_core_drop_replies \
+ opensips_core_err_requests \
+ opensips_core_err_replies \
+ opensips_core_bad_URIs_rcvd \
+ opensips_core_unsupported_methods \
+ opensips_core_bad_msg_hdr \
+ opensips_tm_received_replies \
+ opensips_tm_relayed_replies \
+ opensips_tm_local_replies \
+ opensips_tm_2xx_transactions \
+ opensips_tm_3xx_transactions \
+ opensips_tm_4xx_transactions \
+ opensips_tm_5xx_transactions \
+ opensips_tm_6xx_transactions \
+ opensips_tm_inuse_transactions \
+ opensips_sl_1xx_replies \
+ opensips_sl_2xx_replies \
+ opensips_sl_3xx_replies \
+ opensips_sl_4xx_replies \
+ opensips_sl_5xx_replies \
+ opensips_sl_6xx_replies \
+ opensips_sl_sent_replies \
+ opensips_sl_sent_err_replies \
+ opensips_sl_received_ACKs \
+ opensips_dialog_processed_dialogs \
+ opensips_dialog_expired_dialogs \
+ opensips_dialog_failed_dialogs \
+ opensips_net_waiting_udp \
+ opensips_net_waiting_tcp \
+ opensips_uri_positive_checks \
+ opensips_uri_negative_checks \
+ opensips_siptrace_traced_requests \
+ opensips_siptrace_traced_replies \
+ opensips_shmem_total_size \
+ opensips_shmem_used_size \
+ opensips_shmem_real_used_size \
+ opensips_shmem_max_used_size \
+ opensips_shmem_free_size \
+ opensips_shmem_fragments
+
+ opensips_command_failed=0
+ eval "local $(opensips_get_stats)"
+ # shellcheck disable=SC2181
+ [ $? -ne 0 ] && return 1
+
+ [ $opensips_command_failed -eq 1 ] && error "failed to get values, disabling." && return 1
+
+ # write the result of the work.
+ cat <<VALUESEOF
+BEGIN opensips.dialogs_active $1
+SET dialog_active_dialogs = $opensips_dialog_active_dialogs
+SET dialog_early_dialogs = $opensips_dialog_early_dialogs
+END
+BEGIN opensips.users $1
+SET usrloc_registered_users = $opensips_usrloc_registered_users
+SET usrloc_location_users = $opensips_usrloc_location_users
+SET usrloc_location_contacts = $opensips_usrloc_location_contacts
+SET usrloc_location_expires = $opensips_usrloc_location_expires
+END
+BEGIN opensips.registrar $1
+SET registrar_accepted_regs = $opensips_registrar_accepted_regs
+SET registrar_rejected_regs = $opensips_registrar_rejected_regs
+END
+BEGIN opensips.transactions $1
+SET tm_UAS_transactions = $opensips_tm_UAS_transactions
+SET tm_UAC_transactions = $opensips_tm_UAC_transactions
+END
+BEGIN opensips.core_rcv $1
+SET core_rcv_requests = $opensips_core_rcv_requests
+SET core_rcv_replies = $opensips_core_rcv_replies
+END
+BEGIN opensips.core_fwd $1
+SET core_fwd_requests = $opensips_core_fwd_requests
+SET core_fwd_replies = $opensips_core_fwd_replies
+END
+BEGIN opensips.core_drop $1
+SET core_drop_requests = $opensips_core_drop_requests
+SET core_drop_replies = $opensips_core_drop_replies
+END
+BEGIN opensips.core_err $1
+SET core_err_requests = $opensips_core_err_requests
+SET core_err_replies = $opensips_core_err_replies
+END
+BEGIN opensips.core_bad $1
+SET core_bad_URIs_rcvd = $opensips_core_bad_URIs_rcvd
+SET core_unsupported_methods = $opensips_core_unsupported_methods
+SET core_bad_msg_hdr = $opensips_core_bad_msg_hdr
+END
+BEGIN opensips.tm_replies $1
+SET tm_received_replies = $opensips_tm_received_replies
+SET tm_relayed_replies = $opensips_tm_relayed_replies
+SET tm_local_replies = $opensips_tm_local_replies
+END
+BEGIN opensips.transactions_status $1
+SET tm_2xx_transactions = $opensips_tm_2xx_transactions
+SET tm_3xx_transactions = $opensips_tm_3xx_transactions
+SET tm_4xx_transactions = $opensips_tm_4xx_transactions
+SET tm_5xx_transactions = $opensips_tm_5xx_transactions
+SET tm_6xx_transactions = $opensips_tm_6xx_transactions
+END
+BEGIN opensips.transactions_inuse $1
+SET tm_inuse_transactions = $opensips_tm_inuse_transactions
+END
+BEGIN opensips.sl_replies $1
+SET sl_1xx_replies = $opensips_sl_1xx_replies
+SET sl_2xx_replies = $opensips_sl_2xx_replies
+SET sl_3xx_replies = $opensips_sl_3xx_replies
+SET sl_4xx_replies = $opensips_sl_4xx_replies
+SET sl_5xx_replies = $opensips_sl_5xx_replies
+SET sl_6xx_replies = $opensips_sl_6xx_replies
+SET sl_sent_replies = $opensips_sl_sent_replies
+SET sl_sent_err_replies = $opensips_sl_sent_err_replies
+SET sl_received_ACKs = $opensips_sl_received_ACKs
+END
+BEGIN opensips.dialogs $1
+SET dialog_processed_dialogs = $opensips_dialog_processed_dialogs
+SET dialog_expired_dialogs = $opensips_dialog_expired_dialogs
+SET dialog_failed_dialogs = $opensips_dialog_failed_dialogs
+END
+BEGIN opensips.net_waiting $1
+SET net_waiting_udp = $opensips_net_waiting_udp
+SET net_waiting_tcp = $opensips_net_waiting_tcp
+END
+BEGIN opensips.uri_checks $1
+SET uri_positive_checks = $opensips_uri_positive_checks
+SET uri_negative_checks = $opensips_uri_negative_checks
+END
+BEGIN opensips.traces $1
+SET siptrace_traced_requests = $opensips_siptrace_traced_requests
+SET siptrace_traced_replies = $opensips_siptrace_traced_replies
+END
+BEGIN opensips.shmem $1
+SET shmem_total_size = $opensips_shmem_total_size
+SET shmem_used_size = $opensips_shmem_used_size
+SET shmem_real_used_size = $opensips_shmem_real_used_size
+SET shmem_max_used_size = $opensips_shmem_max_used_size
+SET shmem_free_size = $opensips_shmem_free_size
+END
+BEGIN opensips.shmem_fragments $1
+SET shmem_fragments = $opensips_shmem_fragments
+END
+VALUESEOF
+
+ return 0
+}
diff --git a/collectors/charts.d.plugin/opensips/opensips.conf b/collectors/charts.d.plugin/opensips/opensips.conf
new file mode 100644
index 000000000..e25111dce
--- /dev/null
+++ b/collectors/charts.d.plugin/opensips/opensips.conf
@@ -0,0 +1,21 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+#opensips_opts="fifo get_statistics all"
+#opensips_cmd=
+#opensips_timeout=2
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#opensips_update_every=5
+
+# the charts priority on the dashboard
+#opensips_priority=80000
+
+# the number of retries to do in case of failure
+# before disabling the module
+#opensips_retries=10
diff --git a/collectors/charts.d.plugin/phpfpm/Makefile.inc b/collectors/charts.d.plugin/phpfpm/Makefile.inc
new file mode 100644
index 000000000..56bff6102
--- /dev/null
+++ b/collectors/charts.d.plugin/phpfpm/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += phpfpm/phpfpm.chart.sh
+dist_chartsconfig_DATA += phpfpm/phpfpm.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += phpfpm/README.md phpfpm/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/phpfpm/README.md b/collectors/charts.d.plugin/phpfpm/README.md
new file mode 100644
index 000000000..d82951aac
--- /dev/null
+++ b/collectors/charts.d.plugin/phpfpm/README.md
@@ -0,0 +1,2 @@
+> THIS MODULE IS OBSOLETE.
+> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
diff --git a/collectors/charts.d.plugin/phpfpm/phpfpm.chart.sh b/collectors/charts.d.plugin/phpfpm/phpfpm.chart.sh
new file mode 100644
index 000000000..1af7910bc
--- /dev/null
+++ b/collectors/charts.d.plugin/phpfpm/phpfpm.chart.sh
@@ -0,0 +1,198 @@
+# shellcheck shell=bash
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+#
+# Contributed by @safeie with PR #276
+
+# first, you need open php-fpm status in php-fpm.conf
+# second, you need add status location in nginx.conf
+# you can see, https://easyengine.io/tutorials/php/fpm-status-page/
+
+declare -A phpfpm_urls=()
+declare -A phpfpm_curl_opts=()
+
+# _update_every is a special variable - it holds the number of seconds
+# between the calls of the _update() function
+phpfpm_update_every=
+phpfpm_priority=60000
+
+declare -a phpfpm_response=()
+phpfpm_pool=""
+phpfpm_start_time=""
+phpfpm_start_since=0
+phpfpm_accepted_conn=0
+phpfpm_listen_queue=0
+phpfpm_max_listen_queue=0
+phpfpm_listen_queue_len=0
+phpfpm_idle_processes=0
+phpfpm_active_processes=0
+phpfpm_total_processes=0
+phpfpm_max_active_processes=0
+phpfpm_max_children_reached=0
+phpfpm_slow_requests=0
+phpfpm_get() {
+ local opts="${1}" url="${2}"
+
+ # shellcheck disable=SC2207,2086
+ phpfpm_response=($(run curl -Ss ${opts} "${url}"))
+ # shellcheck disable=SC2181
+ if [ $? -ne 0 ] || [ "${#phpfpm_response[@]}" -eq 0 ]; then
+ return 1
+ fi
+
+ if [[ "${phpfpm_response[0]}" != "pool:" \
+ || "${phpfpm_response[2]}" != "process" \
+ || "${phpfpm_response[5]}" != "start" \
+ || "${phpfpm_response[12]}" != "accepted" \
+ || "${phpfpm_response[15]}" != "listen" \
+ || "${phpfpm_response[16]}" != "queue:" \
+ || "${phpfpm_response[26]}" != "idle" \
+ || "${phpfpm_response[29]}" != "active" \
+ || "${phpfpm_response[32]}" != "total" \
+ ]]
+ then
+ error "invalid response from phpfpm status server: ${phpfpm_response[*]}"
+ return 1
+ fi
+
+ phpfpm_pool="${phpfpm_response[1]}"
+ phpfpm_start_time="${phpfpm_response[7]} ${phpfpm_response[8]}"
+ phpfpm_start_since="${phpfpm_response[11]}"
+ phpfpm_accepted_conn="${phpfpm_response[14]}"
+ phpfpm_listen_queue="${phpfpm_response[17]}"
+ phpfpm_max_listen_queue="${phpfpm_response[21]}"
+ phpfpm_listen_queue_len="${phpfpm_response[25]}"
+ phpfpm_idle_processes="${phpfpm_response[28]}"
+ phpfpm_active_processes="${phpfpm_response[31]}"
+ phpfpm_total_processes="${phpfpm_response[34]}"
+ phpfpm_max_active_processes="${phpfpm_response[38]}"
+ phpfpm_max_children_reached="${phpfpm_response[42]}"
+ if [ "${phpfpm_response[43]}" == "slow" ]
+ then
+ phpfpm_slow_requests="${phpfpm_response[45]}"
+ else
+ phpfpm_slow_requests="-1"
+ fi
+
+ if [[ -z "${phpfpm_pool}" \
+ || -z "${phpfpm_start_time}" \
+ || -z "${phpfpm_start_since}" \
+ || -z "${phpfpm_accepted_conn}" \
+ || -z "${phpfpm_listen_queue}" \
+ || -z "${phpfpm_max_listen_queue}" \
+ || -z "${phpfpm_listen_queue_len}" \
+ || -z "${phpfpm_idle_processes}" \
+ || -z "${phpfpm_active_processes}" \
+ || -z "${phpfpm_total_processes}" \
+ || -z "${phpfpm_max_active_processes}" \
+ || -z "${phpfpm_max_children_reached}" \
+ ]]
+ then
+ error "empty values got from phpfpm status server: ${phpfpm_response[*]}"
+ return 1
+ fi
+
+ return 0
+}
+
+# _check is called once, to find out if this chart should be enabled or not
+phpfpm_check() {
+ if [ ${#phpfpm_urls[@]} -eq 0 ]; then
+ phpfpm_urls[local]="http://localhost/status"
+ fi
+
+ local m
+ for m in "${!phpfpm_urls[@]}"
+ do
+ phpfpm_get "${phpfpm_curl_opts[$m]}" "${phpfpm_urls[$m]}"
+ # shellcheck disable=SC2181
+ if [ $? -ne 0 ]; then
+ # shellcheck disable=SC2154
+ error "cannot find status on URL '${phpfpm_urls[$m]}'. Please set phpfpm_urls[$m]='http://localhost/status' in $confd/phpfpm.conf"
+ unset "phpfpm_urls[$m]"
+ continue
+ fi
+ done
+
+ if [ ${#phpfpm_urls[@]} -eq 0 ]; then
+ error "no phpfpm servers found. Please set phpfpm_urls[name]='url' to whatever needed to get status to the phpfpm server, in $confd/phpfpm.conf"
+ return 1
+ fi
+
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
+
+ return 0
+}
+
+# _create is called once, to create the charts
+phpfpm_create() {
+ local m
+ for m in "${!phpfpm_urls[@]}"
+ do
+ cat <<EOF
+CHART phpfpm_$m.connections '' "PHP-FPM Active Connections" "connections" phpfpm phpfpm.connections line $((phpfpm_priority + 1)) $phpfpm_update_every
+DIMENSION active '' absolute 1 1
+DIMENSION maxActive 'max active' absolute 1 1
+DIMENSION idle '' absolute 1 1
+
+CHART phpfpm_$m.requests '' "PHP-FPM Requests" "requests/s" phpfpm phpfpm.requests line $((phpfpm_priority + 2)) $phpfpm_update_every
+DIMENSION requests '' incremental 1 1
+
+CHART phpfpm_$m.performance '' "PHP-FPM Performance" "status" phpfpm phpfpm.performance line $((phpfpm_priority + 3)) $phpfpm_update_every
+DIMENSION reached 'max children reached' absolute 1 1
+EOF
+ if [ $((phpfpm_slow_requests)) -ne -1 ]
+ then
+ echo "DIMENSION slow 'slow requests' absolute 1 1"
+ fi
+ done
+
+ return 0
+}
+
+# _update is called continuously, to collect the values
+phpfpm_update() {
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
+
+ # do all the work to collect / calculate the values
+ # for each dimension
+ # remember: KEEP IT SIMPLE AND SHORT
+
+ local m
+ for m in "${!phpfpm_urls[@]}"
+ do
+ phpfpm_get "${phpfpm_curl_opts[$m]}" "${phpfpm_urls[$m]}"
+ # shellcheck disable=SC2181
+ if [ $? -ne 0 ]; then
+ continue
+ fi
+
+ # write the result of the work.
+ cat <<EOF
+BEGIN phpfpm_$m.connections $1
+SET active = $((phpfpm_active_processes))
+SET maxActive = $((phpfpm_max_active_processes))
+SET idle = $((phpfpm_idle_processes))
+END
+BEGIN phpfpm_$m.requests $1
+SET requests = $((phpfpm_accepted_conn))
+END
+BEGIN phpfpm_$m.performance $1
+SET reached = $((phpfpm_max_children_reached))
+EOF
+ if [ $((phpfpm_slow_requests)) -ne -1 ]
+ then
+ echo "SET slow = $((phpfpm_slow_requests))"
+ fi
+ echo "END"
+ done
+
+ return 0
+}
diff --git a/collectors/charts.d.plugin/phpfpm/phpfpm.conf b/collectors/charts.d.plugin/phpfpm/phpfpm.conf
new file mode 100644
index 000000000..e4dd0231b
--- /dev/null
+++ b/collectors/charts.d.plugin/phpfpm/phpfpm.conf
@@ -0,0 +1,27 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+# THIS PLUGIN IS DEPRECATED
+# USE THE PYTHON.D ONE
+
+# first, you need open php-fpm status in php-fpm.conf
+# second, you need add status location in nginx.conf
+# you can see, https://easyengine.io/tutorials/php/fpm-status-page/
+#phpfpm_urls[name]=""
+#phpfpm_curl_opts[name]=""
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#phpfpm_update_every=
+
+# the charts priority on the dashboard
+#phpfpm_priority=60000
+
+# the number of retries to do in case of failure
+# before disabling the module
+#phpfpm_retries=10
+
diff --git a/collectors/charts.d.plugin/postfix/Makefile.inc b/collectors/charts.d.plugin/postfix/Makefile.inc
new file mode 100644
index 000000000..6e148352d
--- /dev/null
+++ b/collectors/charts.d.plugin/postfix/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += postfix/postfix.chart.sh
+dist_chartsconfig_DATA += postfix/postfix.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += postfix/README.md postfix/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/postfix/README.md b/collectors/charts.d.plugin/postfix/README.md
new file mode 100644
index 000000000..5fc265d56
--- /dev/null
+++ b/collectors/charts.d.plugin/postfix/README.md
@@ -0,0 +1,26 @@
+> THIS MODULE IS OBSOLETE.
+> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
+
+# postfix
+
+The plugin will collect the postfix queue size.
+
+It will create two charts:
+
+1. **queue size in emails**
+2. **queue size in KB**
+
+### configuration
+
+This is the internal default for `/etc/netdata/postfix.conf`
+
+```sh
+# the postqueue command
+# if empty, it will use the one found in the system path
+postfix_postqueue=
+
+# how frequently to collect queue size
+postfix_update_every=15
+```
+
+---
diff --git a/collectors/charts.d.plugin/postfix/postfix.chart.sh b/collectors/charts.d.plugin/postfix/postfix.chart.sh
new file mode 100644
index 000000000..8cb938ce1
--- /dev/null
+++ b/collectors/charts.d.plugin/postfix/postfix.chart.sh
@@ -0,0 +1,89 @@
+# shellcheck shell=bash disable=SC1117
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+#
+
+# the postqueue command
+# if empty, it will use the one found in the system path
+postfix_postqueue=
+
+# how frequently to collect queue size
+postfix_update_every=15
+
+postfix_priority=60000
+
+postfix_check() {
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
+
+ # try to find the postqueue executable
+ if [ -z "$postfix_postqueue" ] || [ ! -x "$postfix_postqueue" ]
+ then
+ # shellcheck disable=SC2230
+ postfix_postqueue="$(which postqueue 2>/dev/null || command -v postqueue 2>/dev/null)"
+ fi
+
+ if [ -z "$postfix_postqueue" ] || [ ! -x "$postfix_postqueue" ]
+ then
+ # shellcheck disable=SC2154
+ error "cannot find postqueue. Please set 'postfix_postqueue=/path/to/postqueue' in $confd/postfix.conf"
+ return 1
+ fi
+
+ return 0
+}
+
+postfix_create() {
+cat <<EOF
+CHART postfix_local.qemails '' "Postfix Queue Emails" "emails" queue postfix.queued.emails line $((postfix_priority + 1)) $postfix_update_every
+DIMENSION emails '' absolute 1 1
+CHART postfix_local.qsize '' "Postfix Queue Emails Size" "emails size in KB" queue postfix.queued.size area $((postfix_priority + 2)) $postfix_update_every
+DIMENSION size '' absolute 1 1
+EOF
+
+ return 0
+}
+
+postfix_update() {
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
+
+ # do all the work to collect / calculate the values
+ # for each dimension
+ # remember: KEEP IT SIMPLE AND SHORT
+
+ # 1. execute postqueue -p
+ # 2. get the line that begins with --
+ # 3. match the 2 numbers on the line and output 2 lines like these:
+ # local postfix_q_size=NUMBER
+ # local postfix_q_emails=NUMBER
+ # 4. then execute this a script with the eval
+ #
+ # be very carefull with eval:
+ # prepare the script and always egrep at the end the lines that are usefull, so that
+ # even if something goes wrong, no other code can be executed
+ postfix_q_emails=0
+ postfix_q_size=0
+
+ eval "$(run "$postfix_postqueue" -p |\
+ grep "^--" |\
+ sed -e "s/-- \([0-9]\+\) Kbytes in \([0-9]\+\) Requests.$/local postfix_q_size=\1\nlocal postfix_q_emails=\2/g" |\
+ grep -E "^local postfix_q_(emails|size)=[0-9]+$")"
+
+ # write the result of the work.
+ cat <<VALUESEOF
+BEGIN postfix_local.qemails $1
+SET emails = $postfix_q_emails
+END
+BEGIN postfix_local.qsize $1
+SET size = $postfix_q_size
+END
+VALUESEOF
+
+ return 0
+}
diff --git a/collectors/charts.d.plugin/postfix/postfix.conf b/collectors/charts.d.plugin/postfix/postfix.conf
new file mode 100644
index 000000000..b77817bd6
--- /dev/null
+++ b/collectors/charts.d.plugin/postfix/postfix.conf
@@ -0,0 +1,25 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+# THIS PLUGIN IS DEPRECATED
+# USE THE PYTHON.D ONE
+
+# the postqueue command
+# if empty, it will use the one found in the system path
+#postfix_postqueue=
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#postfix_update_every=15
+
+# the charts priority on the dashboard
+#postfix_priority=60000
+
+# the number of retries to do in case of failure
+# before disabling the module
+#postfix_retries=10
+
diff --git a/collectors/charts.d.plugin/sensors/Makefile.inc b/collectors/charts.d.plugin/sensors/Makefile.inc
new file mode 100644
index 000000000..f466a1b62
--- /dev/null
+++ b/collectors/charts.d.plugin/sensors/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += sensors/sensors.chart.sh
+dist_chartsconfig_DATA += sensors/sensors.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += sensors/README.md sensors/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/sensors/README.md b/collectors/charts.d.plugin/sensors/README.md
new file mode 100644
index 000000000..ddc3650d6
--- /dev/null
+++ b/collectors/charts.d.plugin/sensors/README.md
@@ -0,0 +1,52 @@
+> THIS MODULE IS OBSOLETE.
+> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
+
+> Unlike the python one, this module can collect temperature on RPi.
+
+# sensors
+
+The plugin will provide charts for all configured system sensors
+
+> This plugin is reading sensors directly from the kernel.
+> The `lm-sensors` package is able to perform calculations on the
+> kernel provided values, this plugin will not perform.
+> So, the values graphed, are the raw hardware values of the sensors.
+
+The plugin will create netdata charts for:
+
+1. **Temperature**
+2. **Voltage**
+3. **Current**
+4. **Power**
+5. **Fans Speed**
+6. **Energy**
+7. **Humidity**
+
+One chart for every sensor chip found and each of the above will be created.
+
+### configuration
+
+This is the internal default for `/etc/netdata/sensors.conf`
+
+```sh
+# the directory the kernel keeps sensor data
+sensors_sys_dir="${NETDATA_HOST_PREFIX}/sys/devices"
+
+# how deep in the tree to check for sensor data
+sensors_sys_depth=10
+
+# if set to 1, the script will overwrite internal
+# script functions with code generated ones
+# leave to 1, is faster
+sensors_source_update=1
+
+# how frequently to collect sensor data
+# the default is to collect it at every iteration of charts.d
+sensors_update_every=
+
+# array of sensors which are excluded
+# the default is to include all
+sensors_excluded=()
+```
+
+---
diff --git a/collectors/charts.d.plugin/sensors/sensors.chart.sh b/collectors/charts.d.plugin/sensors/sensors.chart.sh
new file mode 100644
index 000000000..54368f1e0
--- /dev/null
+++ b/collectors/charts.d.plugin/sensors/sensors.chart.sh
@@ -0,0 +1,255 @@
+# shellcheck shell=bash
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+#
+
+# sensors docs
+# https://www.kernel.org/doc/Documentation/hwmon/sysfs-interface
+
+# if this chart is called X.chart.sh, then all functions and global variables
+# must start with X_
+
+# the directory the kernel keeps sensor data
+sensors_sys_dir="${NETDATA_HOST_PREFIX}/sys/devices"
+
+# how deep in the tree to check for sensor data
+sensors_sys_depth=10
+
+# if set to 1, the script will overwrite internal
+# script functions with code generated ones
+# leave to 1, is faster
+sensors_source_update=1
+
+# how frequently to collect sensor data
+# the default is to collect it at every iteration of charts.d
+sensors_update_every=
+
+sensors_priority=90000
+
+declare -A sensors_excluded=()
+
+sensors_find_all_files() {
+ find "$1" -maxdepth $sensors_sys_depth -name \*_input -o -name temp 2>/dev/null
+}
+
+sensors_find_all_dirs() {
+ # shellcheck disable=SC2162
+ sensors_find_all_files "$1" | while read
+ do
+ dirname "$REPLY"
+ done | sort -u
+}
+
+# _check is called once, to find out if this chart should be enabled or not
+sensors_check() {
+
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
+
+ [ -z "$( sensors_find_all_files "$sensors_sys_dir" )" ] && error "no sensors found in '$sensors_sys_dir'." && return 1
+ return 0
+}
+
+sensors_check_files() {
+ # we only need sensors that report a non-zero value
+ # also remove not needed sensors
+
+ local f v excluded
+ for f in "$@"
+ do
+ [ ! -f "$f" ] && continue
+ for ex in "${sensors_excluded[@]}"; do
+ [[ $f =~ .*$ex$ ]] && excluded='1' && break
+ done
+
+ [ "$excluded" != "1" ] && v="$( cat "$f" )" || v=0
+ v=$(( v + 1 - 1 ))
+ [ $v -ne 0 ] && echo "$f" && continue
+ excluded=
+
+ error "$f gives zero values"
+ done
+}
+
+sensors_check_temp_type() {
+ # valid temp types are 1 to 6
+ # disabled sensors have the value 0
+
+ local f t v
+ for f in "$@"
+ do
+ # shellcheck disable=SC2001
+ t=$( echo "$f" | sed "s|_input$|_type|g" )
+ [ "$f" = "$t" ] && echo "$f" && continue
+ [ ! -f "$t" ] && echo "$f" && continue
+
+ v="$( cat "$t" )"
+ v=$(( v + 1 - 1 ))
+ [ $v -ne 0 ] && echo "$f" && continue
+
+ error "$f is disabled"
+ done
+}
+
+# _create is called once, to create the charts
+sensors_create() {
+ local path dir name x file lfile labelname device subsystem id type mode files multiplier divisor
+
+ # we create a script with the source of the
+ # sensors_update() function
+ # - the highest speed we can achieve -
+ [ $sensors_source_update -eq 1 ] && echo >"$TMP_DIR/sensors.sh" "sensors_update() {"
+
+ for path in $( sensors_find_all_dirs "$sensors_sys_dir" | sort -u )
+ do
+ dir=$( basename "$path" )
+ device=
+ subsystem=
+ id=
+ type=
+ name=
+
+ [ -h "$path/device" ] && device=$( readlink -f "$path/device" )
+ [ ! -z "$device" ] && device=$( basename "$device" )
+ [ -z "$device" ] && device="$dir"
+
+ [ -h "$path/subsystem" ] && subsystem=$( readlink -f "$path/subsystem" )
+ [ ! -z "$subsystem" ] && subsystem=$( basename "$subsystem" )
+ [ -z "$subsystem" ] && subsystem="$dir"
+
+ [ -f "$path/name" ] && name=$( cat "$path/name" )
+ [ -z "$name" ] && name="$dir"
+
+ [ -f "$path/type" ] && type=$( cat "$path/type" )
+ [ -z "$type" ] && type="$dir"
+
+ id="$( fixid "$device.$subsystem.$dir" )"
+
+ debug "path='$path', dir='$dir', device='$device', subsystem='$subsystem', id='$id', name='$name'"
+
+ for mode in temperature voltage fans power current energy humidity
+ do
+ files=
+ multiplier=1
+ divisor=1
+ algorithm="absolute"
+
+ case $mode in
+ temperature)
+ files="$( ls "$path"/temp*_input 2>/dev/null; ls "$path/temp" 2>/dev/null )"
+ files="$( sensors_check_files "$files" )"
+ files="$( sensors_check_temp_type "$files" )"
+ [ -z "$files" ] && continue
+ echo "CHART sensors.temp_$id '' '$name Temperature' 'Celsius' 'temperature' 'sensors.temp' line $((sensors_priority + 1)) $sensors_update_every"
+ echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.temp_$id \$1\""
+ divisor=1000
+ ;;
+
+ voltage)
+ files="$( ls "$path"/in*_input 2>/dev/null )"
+ files="$( sensors_check_files "$files" )"
+ [ -z "$files" ] && continue
+ echo "CHART sensors.volt_$id '' '$name Voltage' 'Volts' 'voltage' 'sensors.volt' line $((sensors_priority + 2)) $sensors_update_every"
+ echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.volt_$id \$1\""
+ divisor=1000
+ ;;
+
+ current)
+ files="$( ls "$path"/curr*_input 2>/dev/null )"
+ files="$( sensors_check_files "$files" )"
+ [ -z "$files" ] && continue
+ echo "CHART sensors.curr_$id '' '$name Current' 'Ampere' 'current' 'sensors.curr' line $((sensors_priority + 3)) $sensors_update_every"
+ echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.curr_$id \$1\""
+ divisor=1000
+ ;;
+
+ power)
+ files="$( ls "$path"/power*_input 2>/dev/null )"
+ files="$( sensors_check_files "$files" )"
+ [ -z "$files" ] && continue
+ echo "CHART sensors.power_$id '' '$name Power' 'Watt' 'power' 'sensors.power' line $((sensors_priority + 4)) $sensors_update_every"
+ echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.power_$id \$1\""
+ divisor=1000000
+ ;;
+
+ fans)
+ files="$( ls "$path"/fan*_input 2>/dev/null )"
+ files="$( sensors_check_files "$files" )"
+ [ -z "$files" ] && continue
+ echo "CHART sensors.fan_$id '' '$name Fans Speed' 'Rotations / Minute' 'fans' 'sensors.fans' line $((sensors_priority + 5)) $sensors_update_every"
+ echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.fan_$id \$1\""
+ ;;
+
+ energy)
+ files="$( ls "$path"/energy*_input 2>/dev/null )"
+ files="$( sensors_check_files "$files" )"
+ [ -z "$files" ] && continue
+ echo "CHART sensors.energy_$id '' '$name Energy' 'Joule' 'energy' 'sensors.energy' areastack $((sensors_priority + 6)) $sensors_update_every"
+ echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.energy_$id \$1\""
+ algorithm="incremental"
+ divisor=1000000
+ ;;
+
+ humidity)
+ files="$( ls "$path"/humidity*_input 2>/dev/null )"
+ files="$( sensors_check_files "$files" )"
+ [ -z "$files" ] && continue
+ echo "CHART sensors.humidity_$id '' '$name Humidity' 'Percent' 'humidity' 'sensors.humidity' line $((sensors_priority + 7)) $sensors_update_every"
+ echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.humidity_$id \$1\""
+ divisor=1000
+ ;;
+
+ *)
+ continue
+ ;;
+ esac
+
+ for x in $files
+ do
+ file="$x"
+ fid="$( fixid "$file" )"
+ lfile="$( basename "$file" | sed "s|_input$|_label|g" )"
+ labelname="$( basename "$file" | sed "s|_input$||g" )"
+
+ if [ ! "$path/$lfile" = "$file" ] && [ -f "$path/$lfile" ]
+ then
+ labelname="$( cat "$path/$lfile" )"
+ fi
+
+ echo "DIMENSION $fid '$labelname' $algorithm $multiplier $divisor"
+ echo >>"$TMP_DIR/sensors.sh" "echo \"SET $fid = \"\$(< $file )"
+ done
+
+ echo >>"$TMP_DIR/sensors.sh" "echo END"
+ done
+ done
+
+ [ $sensors_source_update -eq 1 ] && echo >>"$TMP_DIR/sensors.sh" "}"
+
+ # ok, load the function sensors_update() we created
+ # shellcheck source=/dev/null
+ [ $sensors_source_update -eq 1 ] && . "$TMP_DIR/sensors.sh"
+
+ return 0
+}
+
+# _update is called continuously, to collect the values
+sensors_update() {
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
+
+ # do all the work to collect / calculate the values
+ # for each dimension
+ # remember: KEEP IT SIMPLE AND SHORT
+
+ # shellcheck source=/dev/null
+ [ $sensors_source_update -eq 0 ] && . "$TMP_DIR/sensors.sh" "$1"
+
+ return 0
+}
+
diff --git a/collectors/charts.d.plugin/sensors/sensors.conf b/collectors/charts.d.plugin/sensors/sensors.conf
new file mode 100644
index 000000000..bcb28807d
--- /dev/null
+++ b/collectors/charts.d.plugin/sensors/sensors.conf
@@ -0,0 +1,32 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+# THIS PLUGIN IS DEPRECATED
+# USE THE PYTHON.D ONE
+
+# the directory the kernel keeps sensor data
+#sensors_sys_dir="/sys/devices"
+
+# how deep in the tree to check for sensor data
+#sensors_sys_depth=10
+
+# if set to 1, the script will overwrite internal
+# script functions with code generated ones
+# leave to 1, is faster
+#sensors_source_update=1
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#sensors_update_every=
+
+# the charts priority on the dashboard
+#sensors_priority=90000
+
+# the number of retries to do in case of failure
+# before disabling the module
+#sensors_retries=10
+
diff --git a/collectors/charts.d.plugin/squid/Makefile.inc b/collectors/charts.d.plugin/squid/Makefile.inc
new file mode 100644
index 000000000..ad470d88c
--- /dev/null
+++ b/collectors/charts.d.plugin/squid/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += squid/squid.chart.sh
+dist_chartsconfig_DATA += squid/squid.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += squid/README.md squid/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/squid/README.md b/collectors/charts.d.plugin/squid/README.md
new file mode 100644
index 000000000..0934ccfcf
--- /dev/null
+++ b/collectors/charts.d.plugin/squid/README.md
@@ -0,0 +1,66 @@
+> THIS MODULE IS OBSOLETE.
+> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
+
+
+# squid
+
+The plugin will monitor a squid server.
+
+It will produce 4 charts:
+
+1. **Squid Client Bandwidth** in kbps
+
+ * in
+ * out
+ * hits
+
+2. **Squid Client Requests** in requests/sec
+
+ * requests
+ * hits
+ * errors
+
+3. **Squid Server Bandwidth** in kbps
+
+ * in
+ * out
+
+4. **Squid Server Requests** in requests/sec
+
+ * requests
+ * errors
+
+### autoconfig
+
+The plugin will by itself detect squid servers running on
+localhost, on ports 3128 or 8080.
+
+It will attempt to download URLs in the form:
+
+- `cache_object://HOST:PORT/counters`
+- `/squid-internal-mgr/counters`
+
+If any succeeds, it will use this.
+
+### configuration
+
+If you need to configure it by hand, create the file
+`/etc/netdata/squid.conf` with the following variables:
+
+- `squid_host=IP` the IP of the squid host
+- `squid_port=PORT` the port the squid is listening
+- `squid_url="URL"` the URL with the statistics to be fetched from squid
+- `squid_timeout=SECONDS` how much time we should wait for squid to respond
+- `squid_update_every=SECONDS` the frequency of the data collection
+
+Example `/etc/netdata/squid.conf`:
+
+```sh
+squid_host=127.0.0.1
+squid_port=3128
+squid_url="cache_object://127.0.0.1:3128/counters"
+squid_timeout=2
+squid_update_every=5
+```
+
+---
diff --git a/collectors/charts.d.plugin/squid/squid.chart.sh b/collectors/charts.d.plugin/squid/squid.chart.sh
new file mode 100644
index 000000000..cf5d1d78a
--- /dev/null
+++ b/collectors/charts.d.plugin/squid/squid.chart.sh
@@ -0,0 +1,147 @@
+# shellcheck shell=bash disable=SC2154
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+#
+
+squid_host=
+squid_port=
+squid_url=
+squid_update_every=2
+squid_priority=60000
+
+squid_get_stats_internal() {
+ local host="$1" port="$2" url="$3"
+ run squidclient -h "$host" -p "$port" "$url"
+}
+
+squid_get_stats() {
+ squid_get_stats_internal "$squid_host" "$squid_port" "$squid_url"
+}
+
+squid_autodetect() {
+ local host="127.0.0.1" port url x
+
+ for port in 3128 8080
+ do
+ for url in "cache_object://$host:$port/counters" "/squid-internal-mgr/counters"
+ do
+ x=$(squid_get_stats_internal "$host" "$port" "$url" | grep client_http.requests)
+ if [ ! -z "$x" ]
+ then
+ squid_host="$host"
+ squid_port="$port"
+ squid_url="$url"
+ debug "found squid at '$host:$port' with url '$url'"
+ return 0
+ fi
+ done
+ done
+
+ error "cannot find squid running in localhost. Please set squid_url='url' and squid_host='IP' and squid_port='PORT' in $confd/squid.conf"
+ return 1
+}
+
+squid_check() {
+ require_cmd squidclient || return 1
+ require_cmd sed || return 1
+ require_cmd egrep || return 1
+
+ if [ -z "$squid_host" ] || [ -z "$squid_port" ] || [ -z "$squid_url" ]
+ then
+ squid_autodetect || return 1
+ fi
+
+ # check once if the url works
+ local x
+ x="$(squid_get_stats | grep client_http.requests)"
+ # shellcheck disable=SC2181
+ if [ ! $? -eq 0 ] || [ -z "$x" ]
+ then
+ error "cannot fetch URL '$squid_url' by connecting to $squid_host:$squid_port. Please set squid_url='url' and squid_host='host' and squid_port='port' in $confd/squid.conf"
+ return 1
+ fi
+
+ return 0
+}
+
+squid_create() {
+ # create the charts
+ cat <<EOF
+CHART squid_local.clients_net '' "Squid Client Bandwidth" "kilobits / sec" clients squid.clients.net area $((squid_priority + 1)) $squid_update_every
+DIMENSION client_http_kbytes_in in incremental 8 1
+DIMENSION client_http_kbytes_out out incremental -8 1
+DIMENSION client_http_hit_kbytes_out hits incremental -8 1
+
+CHART squid_local.clients_requests '' "Squid Client Requests" "requests / sec" clients squid.clients.requests line $((squid_priority + 3)) $squid_update_every
+DIMENSION client_http_requests requests incremental 1 1
+DIMENSION client_http_hits hits incremental 1 1
+DIMENSION client_http_errors errors incremental -1 1
+
+CHART squid_local.servers_net '' "Squid Server Bandwidth" "kilobits / sec" servers squid.servers.net area $((squid_priority + 2)) $squid_update_every
+DIMENSION server_all_kbytes_in in incremental 8 1
+DIMENSION server_all_kbytes_out out incremental -8 1
+
+CHART squid_local.servers_requests '' "Squid Server Requests" "requests / sec" servers squid.servers.requests line $((squid_priority + 4)) $squid_update_every
+DIMENSION server_all_requests requests incremental 1 1
+DIMENSION server_all_errors errors incremental -1 1
+EOF
+
+ return 0
+}
+
+
+squid_update() {
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
+
+ # do all the work to collect / calculate the values
+ # for each dimension
+ # remember: KEEP IT SIMPLE AND SHORT
+
+ # 1. get the counters page from squid
+ # 2. sed to remove spaces; replace . with _; remove spaces around =; prepend each line with: local squid_
+ # 3. egrep lines starting with:
+ # local squid_client_http_ then one or more of these a-z 0-9 _ then = and one of more of 0-9
+ # local squid_server_all_ then one or more of these a-z 0-9 _ then = and one of more of 0-9
+ # 4. then execute this as a script with the eval
+ #
+ # be very carefull with eval:
+ # prepare the script and always grep at the end the lines that are usefull, so that
+ # even if something goes wrong, no other code can be executed
+
+ # shellcheck disable=SC1117
+ eval "$(squid_get_stats |\
+ sed -e "s/ \+/ /g" -e "s/\./_/g" -e "s/^\([a-z0-9_]\+\) *= *\([0-9]\+\)$/local squid_\1=\2/g" |\
+ grep -E "^local squid_(client_http|server_all)_[a-z0-9_]+=[0-9]+$")"
+
+ # write the result of the work.
+ cat <<VALUESEOF
+BEGIN squid_local.clients_net $1
+SET client_http_kbytes_in = $squid_client_http_kbytes_in
+SET client_http_kbytes_out = $squid_client_http_kbytes_out
+SET client_http_hit_kbytes_out = $squid_client_http_hit_kbytes_out
+END
+
+BEGIN squid_local.clients_requests $1
+SET client_http_requests = $squid_client_http_requests
+SET client_http_hits = $squid_client_http_hits
+SET client_http_errors = $squid_client_http_errors
+END
+
+BEGIN squid_local.servers_net $1
+SET server_all_kbytes_in = $squid_server_all_kbytes_in
+SET server_all_kbytes_out = $squid_server_all_kbytes_out
+END
+
+BEGIN squid_local.servers_requests $1
+SET server_all_requests = $squid_server_all_requests
+SET server_all_errors = $squid_server_all_errors
+END
+VALUESEOF
+
+ return 0
+}
diff --git a/collectors/charts.d.plugin/squid/squid.conf b/collectors/charts.d.plugin/squid/squid.conf
new file mode 100644
index 000000000..19e928f25
--- /dev/null
+++ b/collectors/charts.d.plugin/squid/squid.conf
@@ -0,0 +1,26 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+# THIS PLUGIN IS DEPRECATED
+# USE THE PYTHON.D ONE
+
+#squid_host=
+#squid_port=
+#squid_url=
+#squid_timeout=2
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#squid_update_every=2
+
+# the charts priority on the dashboard
+#squid_priority=60000
+
+# the number of retries to do in case of failure
+# before disabling the module
+#squid_retries=10
+
diff --git a/collectors/charts.d.plugin/tomcat/Makefile.inc b/collectors/charts.d.plugin/tomcat/Makefile.inc
new file mode 100644
index 000000000..ef05b1953
--- /dev/null
+++ b/collectors/charts.d.plugin/tomcat/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += tomcat/tomcat.chart.sh
+dist_chartsconfig_DATA += tomcat/tomcat.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += tomcat/README.md tomcat/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/tomcat/README.md b/collectors/charts.d.plugin/tomcat/README.md
new file mode 100644
index 000000000..d82951aac
--- /dev/null
+++ b/collectors/charts.d.plugin/tomcat/README.md
@@ -0,0 +1,2 @@
+> THIS MODULE IS OBSOLETE.
+> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
diff --git a/collectors/charts.d.plugin/tomcat/tomcat.chart.sh b/collectors/charts.d.plugin/tomcat/tomcat.chart.sh
new file mode 100644
index 000000000..294487b8b
--- /dev/null
+++ b/collectors/charts.d.plugin/tomcat/tomcat.chart.sh
@@ -0,0 +1,150 @@
+# shellcheck shell=bash
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+#
+# Contributed by @jgeromero with PR #277
+
+# Description: Tomcat netdata charts.d plugin
+# Author: Jorge Romero
+
+# the URL to download tomcat status info
+# usually http://localhost:8080/manager/status?XML=true
+tomcat_url=""
+tomcat_curl_opts=""
+
+# set tomcat username/password here
+tomcat_user=""
+tomcat_password=""
+
+# _update_every is a special variable - it holds the number of seconds
+# between the calls of the _update() function
+tomcat_update_every=
+
+tomcat_priority=60000
+
+# convert tomcat floating point values
+# to integer using this multiplier
+# this only affects precision - the values
+# will be in the proper units
+tomcat_decimal_detail=1000000
+
+# used by volume chart to convert bytes to KB
+tomcat_decimal_KB_detail=1000
+
+tomcat_check() {
+
+ require_cmd xmlstarlet || return 1
+
+
+ # check if url, username, passwords are set
+ if [ -z "${tomcat_url}" ]; then
+ error "tomcat url is unset or set to the empty string"
+ return 1
+ fi
+ if [ -z "${tomcat_user}" ]; then
+ # check backwards compatibility
+ # shellcheck disable=SC2154
+ if [ -z "${tomcatUser}" ]; then
+ error "tomcat user is unset or set to the empty string"
+ return 1
+ else
+ tomcat_user="${tomcatUser}"
+ fi
+ fi
+ if [ -z "${tomcat_password}" ]; then
+ # check backwards compatibility
+ # shellcheck disable=SC2154
+ if [ -z "${tomcatPassword}" ]; then
+ error "tomcat password is unset or set to the empty string"
+ return 1
+ else
+ tomcat_password="${tomcatPassword}"
+ fi
+ fi
+
+ # check if we can get to tomcat's status page
+ tomcat_get
+ # shellcheck disable=2181
+ if [ $? -ne 0 ]
+ then
+ error "cannot get to status page on URL '${tomcat_url}'. Please make sure tomcat url, username and password are correct."
+ return 1
+ fi
+
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
+
+ return 0
+}
+
+tomcat_get() {
+ # collect tomcat values
+ tomcat_port="$(IFS=/ read -ra a <<< "$tomcat_url"; hostport=${a[2]}; echo "${hostport#*:}")"
+ mapfile -t lines < <(run curl -u "$tomcat_user":"$tomcat_password" -Ss ${tomcat_curl_opts} "$tomcat_url" |\
+ run xmlstarlet sel \
+ -t -m "/status/jvm/memory" -v @free \
+ -n -m "/status/connector[@name='\"http-bio-$tomcat_port\"']/threadInfo" -v @currentThreadCount \
+ -n -v @currentThreadsBusy \
+ -n -m "/status/connector[@name='\"http-bio-$tomcat_port\"']/requestInfo" -v @requestCount \
+ -n -v @bytesSent -n -)
+
+ tomcat_jvm_freememory="${lines[0]}"
+ tomcat_threads="${lines[1]}"
+ tomcat_threads_busy="${lines[2]}"
+ tomcat_accesses="${lines[3]}"
+ tomcat_volume="${lines[4]}"
+
+ return 0
+}
+
+# _create is called once, to create the charts
+tomcat_create() {
+ cat <<EOF
+CHART tomcat.accesses '' "tomcat requests" "requests/s" statistics tomcat.accesses area $((tomcat_priority + 8)) $tomcat_update_every
+DIMENSION accesses '' incremental
+CHART tomcat.volume '' "tomcat volume" "KB/s" volume tomcat.volume area $((tomcat_priority + 5)) $tomcat_update_every
+DIMENSION volume '' incremental divisor ${tomcat_decimal_KB_detail}
+CHART tomcat.threads '' "tomcat threads" "current threads" statistics tomcat.threads line $((tomcat_priority + 6)) $tomcat_update_every
+DIMENSION current '' absolute 1
+DIMENSION busy '' absolute 1
+CHART tomcat.jvm '' "JVM Free Memory" "MB" statistics tomcat.jvm area $((tomcat_priority + 8)) $tomcat_update_every
+DIMENSION jvm '' absolute 1 ${tomcat_decimal_detail}
+EOF
+ return 0
+}
+
+# _update is called continuously, to collect the values
+tomcat_update() {
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
+
+ # do all the work to collect / calculate the values
+ # for each dimension
+ # remember: KEEP IT SIMPLE AND SHORT
+
+ tomcat_get || return 1
+
+ # write the result of the work.
+ cat <<VALUESEOF
+BEGIN tomcat.accesses $1
+SET accesses = $((tomcat_accesses))
+END
+BEGIN tomcat.volume $1
+SET volume = $((tomcat_volume))
+END
+BEGIN tomcat.threads $1
+SET current = $((tomcat_threads))
+SET busy = $((tomcat_threads_busy))
+END
+BEGIN tomcat.jvm $1
+SET jvm = $((tomcat_jvm_freememory))
+END
+VALUESEOF
+
+ return 0
+}
diff --git a/collectors/charts.d.plugin/tomcat/tomcat.conf b/collectors/charts.d.plugin/tomcat/tomcat.conf
new file mode 100644
index 000000000..e9f3eefa9
--- /dev/null
+++ b/collectors/charts.d.plugin/tomcat/tomcat.conf
@@ -0,0 +1,38 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+# THIS PLUGIN IS DEPRECATED
+# USE THE PYTHON.D ONE
+
+# the URL to download tomcat status info
+# usually http://localhost:8080/manager/status?XML=true
+#tomcat_url=""
+#tomcat_curl_opts=""
+
+# set tomcat username/password here
+#tomcat_user=""
+#tomcat_password=""
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#tomcat_update_every=1
+
+# the charts priority on the dashboard
+#tomcat_priority=60000
+
+# the number of retries to do in case of failure
+# before disabling the module
+#tomcat_retries=10
+
+# convert tomcat floating point values
+# to integer using this multiplier
+# this only affects precision - the values
+# will be in the proper units
+#tomcat_decimal_detail=1000000
+
+# used by volume chart to convert bytes to KB
+#tomcat_decimal_KB_detail=1000
diff --git a/collectors/checks.plugin/Makefile.am b/collectors/checks.plugin/Makefile.am
new file mode 100644
index 000000000..babdcf0df
--- /dev/null
+++ b/collectors/checks.plugin/Makefile.am
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
diff --git a/collectors/checks.plugin/Makefile.in b/collectors/checks.plugin/Makefile.in
new file mode 100644
index 000000000..632125466
--- /dev/null
+++ b/collectors/checks.plugin/Makefile.in
@@ -0,0 +1,457 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = collectors/checks.plugin
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/checks.plugin/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu collectors/checks.plugin/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/collectors/checks.plugin/plugin_checks.c b/collectors/checks.plugin/plugin_checks.c
new file mode 100644
index 000000000..f8a2008a8
--- /dev/null
+++ b/collectors/checks.plugin/plugin_checks.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_checks.h"
+
+#ifdef NETDATA_INTERNAL_CHECKS
+
+static void checks_main_cleanup(void *ptr) {
+ struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
+
+ info("cleaning up...");
+
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+}
+
+void *checks_main(void *ptr) {
+ netdata_thread_cleanup_push(checks_main_cleanup, ptr);
+
+ usec_t usec = 0, susec = localhost->rrd_update_every * USEC_PER_SEC, loop_usec = 0, total_susec = 0;
+ struct timeval now, last, loop;
+
+ RRDSET *check1, *check2, *check3, *apps_cpu = NULL;
+
+ check1 = rrdset_create_localhost(
+ "netdata"
+ , "check1"
+ , NULL
+ , "netdata"
+ , NULL
+ , "Caller gives microseconds"
+ , "a million !"
+ , "checks.plugin"
+ , ""
+ , NETDATA_CHART_PRIO_CHECKS
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(check1, "absolute", NULL, -1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(check1, "incremental", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ check2 = rrdset_create_localhost(
+ "netdata"
+ , "check2"
+ , NULL
+ , "netdata"
+ , NULL
+ , "Netdata calcs microseconds"
+ , "a million !"
+ , "checks.plugin"
+ , ""
+ , NETDATA_CHART_PRIO_CHECKS
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrddim_add(check2, "absolute", NULL, -1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(check2, "incremental", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ check3 = rrdset_create_localhost(
+ "netdata"
+ , "checkdt"
+ , NULL
+ , "netdata"
+ , NULL
+ , "Clock difference"
+ , "microseconds diff"
+ , "checks.plugin"
+ , ""
+ , NETDATA_CHART_PRIO_CHECKS
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrddim_add(check3, "caller", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(check3, "netdata", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(check3, "apps.plugin", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ now_realtime_timeval(&last);
+ while(!netdata_exit) {
+ usleep(susec);
+
+ // find the time to sleep in order to wait exactly update_every seconds
+ now_realtime_timeval(&now);
+ loop_usec = dt_usec(&now, &last);
+ usec = loop_usec - susec;
+ debug(D_PROCNETDEV_LOOP, "CHECK: last loop took %llu usec (worked for %llu, sleeped for %llu).", loop_usec, usec, susec);
+
+ if(usec < (localhost->rrd_update_every * USEC_PER_SEC / 2ULL)) susec = (localhost->rrd_update_every * USEC_PER_SEC) - usec;
+ else susec = localhost->rrd_update_every * USEC_PER_SEC / 2ULL;
+
+ // --------------------------------------------------------------------
+ // Calculate loop time
+
+ last.tv_sec = now.tv_sec;
+ last.tv_usec = now.tv_usec;
+ total_susec += loop_usec;
+
+ // --------------------------------------------------------------------
+ // check chart 1
+
+ if(check1->counter_done) rrdset_next_usec(check1, loop_usec);
+ rrddim_set(check1, "absolute", 1000000);
+ rrddim_set(check1, "incremental", total_susec);
+ rrdset_done(check1);
+
+ // --------------------------------------------------------------------
+ // check chart 2
+
+ if(check2->counter_done) rrdset_next(check2);
+ rrddim_set(check2, "absolute", 1000000);
+ rrddim_set(check2, "incremental", total_susec);
+ rrdset_done(check2);
+
+ // --------------------------------------------------------------------
+ // check chart 3
+
+ if(!apps_cpu) apps_cpu = rrdset_find_localhost("apps.cpu");
+ if(check3->counter_done) rrdset_next_usec(check3, loop_usec);
+ now_realtime_timeval(&loop);
+ rrddim_set(check3, "caller", (long long) dt_usec(&loop, &check1->last_collected_time));
+ rrddim_set(check3, "netdata", (long long) dt_usec(&loop, &check2->last_collected_time));
+ if(apps_cpu) rrddim_set(check3, "apps.plugin", (long long) dt_usec(&loop, &apps_cpu->last_collected_time));
+ rrdset_done(check3);
+ }
+
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
+
+#endif // NETDATA_INTERNAL_CHECKS
diff --git a/collectors/checks.plugin/plugin_checks.h b/collectors/checks.plugin/plugin_checks.h
new file mode 100644
index 000000000..93494765d
--- /dev/null
+++ b/collectors/checks.plugin/plugin_checks.h
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_PLUGIN_CHECKS_H
+#define NETDATA_PLUGIN_CHECKS_H 1
+
+#include "../../daemon/common.h"
+
+#ifdef NETDATA_INTERNAL_CHECKS
+
+#define NETDATA_PLUGIN_HOOK_CHECKS \
+ { \
+ .name = "PLUGIN[check]", \
+ .config_section = CONFIG_SECTION_PLUGINS, \
+ .config_name = "checks", \
+ .enabled = 0, \
+ .thread = NULL, \
+ .init_routine = NULL, \
+ .start_routine = checks_main \
+ },
+
+extern void *checks_main(void *ptr);
+
+#else // !NETDATA_INTERNAL_CHECKS
+
+#define NETDATA_PLUGIN_HOOK_CHECKS
+
+#endif // NETDATA_INTERNAL_CHECKS
+
+#endif // NETDATA_PLUGIN_CHECKS_H
diff --git a/collectors/diskspace.plugin/Makefile.am b/collectors/diskspace.plugin/Makefile.am
new file mode 100644
index 000000000..19554bed8
--- /dev/null
+++ b/collectors/diskspace.plugin/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/collectors/diskspace.plugin/Makefile.in b/collectors/diskspace.plugin/Makefile.in
new file mode 100644
index 000000000..ceebc5455
--- /dev/null
+++ b/collectors/diskspace.plugin/Makefile.in
@@ -0,0 +1,464 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = collectors/diskspace.plugin
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
+ $(dist_noinst_DATA)
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/diskspace.plugin/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu collectors/diskspace.plugin/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/collectors/diskspace.plugin/README.md b/collectors/diskspace.plugin/README.md
new file mode 100644
index 000000000..74d6cde3c
--- /dev/null
+++ b/collectors/diskspace.plugin/README.md
@@ -0,0 +1,5 @@
+> for disks performance monitoring, see the `proc` plugin, [here](../proc.plugin/#monitoring-disks-performance-with-netdata)
+
+# diskspace.plugin
+
+This plugin monitors the disk space usage of mounted disks, under Linux.
diff --git a/collectors/diskspace.plugin/plugin_diskspace.c b/collectors/diskspace.plugin/plugin_diskspace.c
new file mode 100644
index 000000000..dca7c9076
--- /dev/null
+++ b/collectors/diskspace.plugin/plugin_diskspace.c
@@ -0,0 +1,465 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_diskspace.h"
+
+#define PLUGIN_DISKSPACE_NAME "diskspace.plugin"
+
+#define DELAULT_EXCLUDED_PATHS "/proc/* /sys/* /var/run/user/* /run/user/* /snap/* /var/lib/docker/*"
+#define DEFAULT_EXCLUDED_FILESYSTEMS "*gvfs *gluster* *s3fs *ipfs *davfs2 *httpfs *sshfs *gdfs *moosefs fusectl"
+#define CONFIG_SECTION_DISKSPACE "plugin:proc:diskspace"
+
+static struct mountinfo *disk_mountinfo_root = NULL;
+static int check_for_new_mountpoints_every = 15;
+static int cleanup_mount_points = 1;
+
+static inline void mountinfo_reload(int force) {
+ static time_t last_loaded = 0;
+ time_t now = now_realtime_sec();
+
+ if(force || now - last_loaded >= check_for_new_mountpoints_every) {
+ // mountinfo_free_all() can be called with NULL disk_mountinfo_root
+ mountinfo_free_all(disk_mountinfo_root);
+
+ // re-read mountinfo in case something changed
+ disk_mountinfo_root = mountinfo_read(0);
+
+ last_loaded = now;
+ }
+}
+
+// Data to be stored in DICTIONARY dict_mountpoints used by do_disk_space_stats().
+// This DICTIONARY is used to lookup the settings of the mount point on each iteration.
+struct mount_point_metadata {
+ int do_space;
+ int do_inodes;
+ int shown_error;
+ int updated;
+
+ size_t collected; // the number of times this has been collected
+
+ RRDSET *st_space;
+ RRDDIM *rd_space_used;
+ RRDDIM *rd_space_avail;
+ RRDDIM *rd_space_reserved;
+
+ RRDSET *st_inodes;
+ RRDDIM *rd_inodes_used;
+ RRDDIM *rd_inodes_avail;
+ RRDDIM *rd_inodes_reserved;
+};
+
+static DICTIONARY *dict_mountpoints = NULL;
+
+#define rrdset_obsolete_and_pointer_null(st) do { if(st) { rrdset_is_obsolete(st); (st) = NULL; } } while(st)
+
+int mount_point_cleanup(void *entry, void *data) {
+ (void)data;
+
+ struct mount_point_metadata *mp = (struct mount_point_metadata *)entry;
+ if(!mp) return 0;
+
+ if(likely(mp->updated)) {
+ mp->updated = 0;
+ return 0;
+ }
+
+ if(likely(cleanup_mount_points && mp->collected)) {
+ mp->collected = 0;
+ mp->updated = 0;
+ mp->shown_error = 0;
+
+ mp->rd_space_avail = NULL;
+ mp->rd_space_used = NULL;
+ mp->rd_space_reserved = NULL;
+
+ mp->rd_inodes_avail = NULL;
+ mp->rd_inodes_used = NULL;
+ mp->rd_inodes_reserved = NULL;
+
+ rrdset_obsolete_and_pointer_null(mp->st_space);
+ rrdset_obsolete_and_pointer_null(mp->st_inodes);
+ }
+
+ return 0;
+}
+
+static inline void do_disk_space_stats(struct mountinfo *mi, int update_every) {
+ const char *family = mi->mount_point;
+ const char *disk = mi->persistent_id;
+
+ static SIMPLE_PATTERN *excluded_mountpoints = NULL;
+ static SIMPLE_PATTERN *excluded_filesystems = NULL;
+ int do_space, do_inodes;
+
+ if(unlikely(!dict_mountpoints)) {
+ SIMPLE_PREFIX_MODE mode = SIMPLE_PATTERN_EXACT;
+
+ if(config_move("plugin:proc:/proc/diskstats", "exclude space metrics on paths", CONFIG_SECTION_DISKSPACE, "exclude space metrics on paths") != -1) {
+ // old configuration, enable backwards compatibility
+ mode = SIMPLE_PATTERN_PREFIX;
+ }
+
+ excluded_mountpoints = simple_pattern_create(
+ config_get(CONFIG_SECTION_DISKSPACE, "exclude space metrics on paths", DELAULT_EXCLUDED_PATHS)
+ , NULL
+ , mode
+ );
+
+ excluded_filesystems = simple_pattern_create(
+ config_get(CONFIG_SECTION_DISKSPACE, "exclude space metrics on filesystems", DEFAULT_EXCLUDED_FILESYSTEMS)
+ , NULL
+ , SIMPLE_PATTERN_EXACT
+ );
+
+ dict_mountpoints = dictionary_create(DICTIONARY_FLAG_SINGLE_THREADED);
+ }
+
+ struct mount_point_metadata *m = dictionary_get(dict_mountpoints, mi->mount_point);
+ if(unlikely(!m)) {
+ char var_name[4096 + 1];
+ snprintfz(var_name, 4096, "plugin:proc:diskspace:%s", mi->mount_point);
+
+ int def_space = config_get_boolean_ondemand(CONFIG_SECTION_DISKSPACE, "space usage for all disks", CONFIG_BOOLEAN_AUTO);
+ int def_inodes = config_get_boolean_ondemand(CONFIG_SECTION_DISKSPACE, "inodes usage for all disks", CONFIG_BOOLEAN_AUTO);
+
+ if(unlikely(simple_pattern_matches(excluded_mountpoints, mi->mount_point))) {
+ def_space = CONFIG_BOOLEAN_NO;
+ def_inodes = CONFIG_BOOLEAN_NO;
+ }
+
+ if(unlikely(simple_pattern_matches(excluded_filesystems, mi->filesystem))) {
+ def_space = CONFIG_BOOLEAN_NO;
+ def_inodes = CONFIG_BOOLEAN_NO;
+ }
+
+ // check if the mount point is a directory #2407
+ // but only when it is enabled by default #4491
+ if(def_space != CONFIG_BOOLEAN_NO || def_inodes != CONFIG_BOOLEAN_NO) {
+ struct stat bs;
+ if(stat(mi->mount_point, &bs) == -1) {
+ error("DISKSPACE: Cannot stat() mount point '%s' (disk '%s', filesystem '%s', root '%s')."
+ , mi->mount_point
+ , disk
+ , mi->filesystem?mi->filesystem:""
+ , mi->root?mi->root:""
+ );
+ def_space = CONFIG_BOOLEAN_NO;
+ def_inodes = CONFIG_BOOLEAN_NO;
+ }
+ else {
+ if((bs.st_mode & S_IFMT) != S_IFDIR) {
+ error("DISKSPACE: Mount point '%s' (disk '%s', filesystem '%s', root '%s') is not a directory."
+ , mi->mount_point
+ , disk
+ , mi->filesystem?mi->filesystem:""
+ , mi->root?mi->root:""
+ );
+ def_space = CONFIG_BOOLEAN_NO;
+ def_inodes = CONFIG_BOOLEAN_NO;
+ }
+ }
+ }
+
+ do_space = config_get_boolean_ondemand(var_name, "space usage", def_space);
+ do_inodes = config_get_boolean_ondemand(var_name, "inodes usage", def_inodes);
+
+ struct mount_point_metadata mp = {
+ .do_space = do_space,
+ .do_inodes = do_inodes,
+ .shown_error = 0,
+ .updated = 0,
+
+ .collected = 0,
+
+ .st_space = NULL,
+ .rd_space_avail = NULL,
+ .rd_space_used = NULL,
+ .rd_space_reserved = NULL,
+
+ .st_inodes = NULL,
+ .rd_inodes_avail = NULL,
+ .rd_inodes_used = NULL,
+ .rd_inodes_reserved = NULL
+ };
+
+ m = dictionary_set(dict_mountpoints, mi->mount_point, &mp, sizeof(struct mount_point_metadata));
+ }
+
+ m->updated = 1;
+
+ if(unlikely(m->do_space == CONFIG_BOOLEAN_NO && m->do_inodes == CONFIG_BOOLEAN_NO))
+ return;
+
+ if(unlikely(mi->flags & MOUNTINFO_READONLY && !m->collected))
+ return;
+
+ struct statvfs buff_statvfs;
+ if (statvfs(mi->mount_point, &buff_statvfs) < 0) {
+ if(!m->shown_error) {
+ error("DISKSPACE: failed to statvfs() mount point '%s' (disk '%s', filesystem '%s', root '%s')"
+ , mi->mount_point
+ , disk
+ , mi->filesystem?mi->filesystem:""
+ , mi->root?mi->root:""
+ );
+ m->shown_error = 1;
+ }
+ return;
+ }
+ m->shown_error = 0;
+
+ // logic found at get_fs_usage() in coreutils
+ unsigned long bsize = (buff_statvfs.f_frsize) ? buff_statvfs.f_frsize : buff_statvfs.f_bsize;
+
+ fsblkcnt_t bavail = buff_statvfs.f_bavail;
+ fsblkcnt_t btotal = buff_statvfs.f_blocks;
+ fsblkcnt_t bavail_root = buff_statvfs.f_bfree;
+ fsblkcnt_t breserved_root = bavail_root - bavail;
+ fsblkcnt_t bused;
+ if(likely(btotal >= bavail_root))
+ bused = btotal - bavail_root;
+ else
+ bused = bavail_root - btotal;
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(unlikely(btotal != bavail + breserved_root + bused))
+ error("DISKSPACE: disk block statistics for '%s' (disk '%s') do not sum up: total = %llu, available = %llu, reserved = %llu, used = %llu", mi->mount_point, disk, (unsigned long long)btotal, (unsigned long long)bavail, (unsigned long long)breserved_root, (unsigned long long)bused);
+#endif
+
+ // --------------------------------------------------------------------------
+
+ fsfilcnt_t favail = buff_statvfs.f_favail;
+ fsfilcnt_t ftotal = buff_statvfs.f_files;
+ fsfilcnt_t favail_root = buff_statvfs.f_ffree;
+ fsfilcnt_t freserved_root = favail_root - favail;
+ fsfilcnt_t fused = ftotal - favail_root;
+
+ if(m->do_inodes == CONFIG_BOOLEAN_AUTO && favail == (fsfilcnt_t)-1) {
+ // this file system does not support inodes reporting
+ // eg. cephfs
+ m->do_inodes = CONFIG_BOOLEAN_NO;
+ }
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(unlikely(btotal != bavail + breserved_root + bused))
+ error("DISKSPACE: disk inode statistics for '%s' (disk '%s') do not sum up: total = %llu, available = %llu, reserved = %llu, used = %llu", mi->mount_point, disk, (unsigned long long)ftotal, (unsigned long long)favail, (unsigned long long)freserved_root, (unsigned long long)fused);
+#endif
+
+ // --------------------------------------------------------------------------
+
+ int rendered = 0;
+
+ if(m->do_space == CONFIG_BOOLEAN_YES || (m->do_space == CONFIG_BOOLEAN_AUTO && (bavail || breserved_root || bused))) {
+ if(unlikely(!m->st_space)) {
+ m->do_space = CONFIG_BOOLEAN_YES;
+ m->st_space = rrdset_find_bytype_localhost("disk_space", disk);
+ if(unlikely(!m->st_space)) {
+ char title[4096 + 1];
+ snprintfz(title, 4096, "Disk Space Usage for %s [%s]", family, mi->mount_source);
+ m->st_space = rrdset_create_localhost(
+ "disk_space"
+ , disk
+ , NULL
+ , family
+ , "disk.space"
+ , title
+ , "GB"
+ , PLUGIN_DISKSPACE_NAME
+ , NULL
+ , NETDATA_CHART_PRIO_DISKSPACE_SPACE
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+ }
+
+ m->rd_space_avail = rrddim_add(m->st_space, "avail", NULL, (collected_number)bsize, 1024 * 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ m->rd_space_used = rrddim_add(m->st_space, "used", NULL, (collected_number)bsize, 1024 * 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ m->rd_space_reserved = rrddim_add(m->st_space, "reserved_for_root", "reserved for root", (collected_number)bsize, 1024 * 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(m->st_space);
+
+ rrddim_set_by_pointer(m->st_space, m->rd_space_avail, (collected_number)bavail);
+ rrddim_set_by_pointer(m->st_space, m->rd_space_used, (collected_number)bused);
+ rrddim_set_by_pointer(m->st_space, m->rd_space_reserved, (collected_number)breserved_root);
+ rrdset_done(m->st_space);
+
+ rendered++;
+ }
+
+ // --------------------------------------------------------------------------
+
+ if(m->do_inodes == CONFIG_BOOLEAN_YES || (m->do_inodes == CONFIG_BOOLEAN_AUTO && (favail || freserved_root || fused))) {
+ if(unlikely(!m->st_inodes)) {
+ m->do_inodes = CONFIG_BOOLEAN_YES;
+ m->st_inodes = rrdset_find_bytype_localhost("disk_inodes", disk);
+ if(unlikely(!m->st_inodes)) {
+ char title[4096 + 1];
+ snprintfz(title, 4096, "Disk Files (inodes) Usage for %s [%s]", family, mi->mount_source);
+ m->st_inodes = rrdset_create_localhost(
+ "disk_inodes"
+ , disk
+ , NULL
+ , family
+ , "disk.inodes"
+ , title
+ , "Inodes"
+ , PLUGIN_DISKSPACE_NAME
+ , NULL
+ , NETDATA_CHART_PRIO_DISKSPACE_INODES
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+ }
+
+ m->rd_inodes_avail = rrddim_add(m->st_inodes, "avail", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ m->rd_inodes_used = rrddim_add(m->st_inodes, "used", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ m->rd_inodes_reserved = rrddim_add(m->st_inodes, "reserved_for_root", "reserved for root", 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(m->st_inodes);
+
+ rrddim_set_by_pointer(m->st_inodes, m->rd_inodes_avail, (collected_number)favail);
+ rrddim_set_by_pointer(m->st_inodes, m->rd_inodes_used, (collected_number)fused);
+ rrddim_set_by_pointer(m->st_inodes, m->rd_inodes_reserved, (collected_number)freserved_root);
+ rrdset_done(m->st_inodes);
+
+ rendered++;
+ }
+
+ // --------------------------------------------------------------------------
+
+ if(likely(rendered))
+ m->collected++;
+}
+
+static void diskspace_main_cleanup(void *ptr) {
+ struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
+
+ info("cleaning up...");
+
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+}
+
+void *diskspace_main(void *ptr) {
+ netdata_thread_cleanup_push(diskspace_main_cleanup, ptr);
+
+ int vdo_cpu_netdata = config_get_boolean("plugin:proc", "netdata server resources", 1);
+
+ cleanup_mount_points = config_get_boolean(CONFIG_SECTION_DISKSPACE, "remove charts of unmounted disks" , cleanup_mount_points);
+
+ int update_every = (int)config_get_number(CONFIG_SECTION_DISKSPACE, "update every", localhost->rrd_update_every);
+ if(update_every < localhost->rrd_update_every)
+ update_every = localhost->rrd_update_every;
+
+ check_for_new_mountpoints_every = (int)config_get_number(CONFIG_SECTION_DISKSPACE, "check for new mount points every", check_for_new_mountpoints_every);
+ if(check_for_new_mountpoints_every < update_every)
+ check_for_new_mountpoints_every = update_every;
+
+ struct rusage thread;
+
+ usec_t duration = 0;
+ usec_t step = update_every * USEC_PER_SEC;
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ while(!netdata_exit) {
+ duration = heartbeat_monotonic_dt_to_now_usec(&hb);
+ /* usec_t hb_dt = */ heartbeat_next(&hb, step);
+
+ if(unlikely(netdata_exit)) break;
+
+
+ // --------------------------------------------------------------------------
+ // this is smart enough not to reload it every time
+
+ mountinfo_reload(0);
+
+
+ // --------------------------------------------------------------------------
+ // disk space metrics
+
+ struct mountinfo *mi;
+ for(mi = disk_mountinfo_root; mi; mi = mi->next) {
+
+ if(unlikely(mi->flags & (MOUNTINFO_IS_DUMMY | MOUNTINFO_IS_BIND)))
+ continue;
+
+ do_disk_space_stats(mi, update_every);
+ if(unlikely(netdata_exit)) break;
+ }
+
+ if(unlikely(netdata_exit)) break;
+
+ if(dict_mountpoints)
+ dictionary_get_all(dict_mountpoints, mount_point_cleanup, NULL);
+
+ if(vdo_cpu_netdata) {
+ static RRDSET *stcpu_thread = NULL, *st_duration = NULL;
+ static RRDDIM *rd_user = NULL, *rd_system = NULL, *rd_duration = NULL;
+
+ // ----------------------------------------------------------------
+
+ getrusage(RUSAGE_THREAD, &thread);
+
+ if(unlikely(!stcpu_thread)) {
+ stcpu_thread = rrdset_create_localhost(
+ "netdata"
+ , "plugin_diskspace"
+ , NULL
+ , "diskspace"
+ , NULL
+ , "NetData Disk Space Plugin CPU usage"
+ , "milliseconds/s"
+ , PLUGIN_DISKSPACE_NAME
+ , NULL
+ , NETDATA_CHART_PRIO_NETDATA_DISKSPACE
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_user = rrddim_add(stcpu_thread, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ rd_system = rrddim_add(stcpu_thread, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(stcpu_thread);
+
+ rrddim_set_by_pointer(stcpu_thread, rd_user, thread.ru_utime.tv_sec * 1000000ULL + thread.ru_utime.tv_usec);
+ rrddim_set_by_pointer(stcpu_thread, rd_system, thread.ru_stime.tv_sec * 1000000ULL + thread.ru_stime.tv_usec);
+ rrdset_done(stcpu_thread);
+
+ // ----------------------------------------------------------------
+
+ if(unlikely(!st_duration)) {
+ st_duration = rrdset_create_localhost(
+ "netdata"
+ , "plugin_diskspace_dt"
+ , NULL
+ , "diskspace"
+ , NULL
+ , "NetData Disk Space Plugin Duration"
+ , "milliseconds/run"
+ , PLUGIN_DISKSPACE_NAME
+ , NULL
+ , 132021
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_duration = rrddim_add(st_duration, "duration", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(st_duration);
+
+ rrddim_set_by_pointer(st_duration, rd_duration, duration);
+ rrdset_done(st_duration);
+
+ // ----------------------------------------------------------------
+
+ if(unlikely(netdata_exit)) break;
+ }
+ }
+
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
diff --git a/collectors/diskspace.plugin/plugin_diskspace.h b/collectors/diskspace.plugin/plugin_diskspace.h
new file mode 100644
index 000000000..7c9df9d13
--- /dev/null
+++ b/collectors/diskspace.plugin/plugin_diskspace.h
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_PLUGIN_PROC_DISKSPACE_H
+#define NETDATA_PLUGIN_PROC_DISKSPACE_H
+
+#include "../../daemon/common.h"
+
+
+#if (TARGET_OS == OS_LINUX)
+
+#define NETDATA_PLUGIN_HOOK_LINUX_DISKSPACE \
+ { \
+ .name = "PLUGIN[diskspace]", \
+ .config_section = CONFIG_SECTION_PLUGINS, \
+ .config_name = "diskspace", \
+ .enabled = 1, \
+ .thread = NULL, \
+ .init_routine = NULL, \
+ .start_routine = diskspace_main \
+ },
+
+extern void *diskspace_main(void *ptr);
+
+#include "../proc.plugin/plugin_proc.h"
+
+#else // (TARGET_OS == OS_LINUX)
+
+#define NETDATA_PLUGIN_HOOK_LINUX_DISKSPACE
+
+#endif // (TARGET_OS == OS_LINUX)
+
+
+
+#endif //NETDATA_PLUGIN_PROC_DISKSPACE_H
diff --git a/collectors/fping.plugin/Makefile.am b/collectors/fping.plugin/Makefile.am
new file mode 100644
index 000000000..4395394db
--- /dev/null
+++ b/collectors/fping.plugin/Makefile.am
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+CLEANFILES = \
+ fping.plugin \
+ $(NULL)
+
+include $(top_srcdir)/build/subst.inc
+SUFFIXES = .in
+
+dist_plugins_SCRIPTS = \
+ fping.plugin \
+ $(NULL)
+
+dist_noinst_DATA = \
+ fping.plugin.in \
+ README.md \
+ $(NULL)
+
+dist_libconfig_DATA = \
+ fping.conf \
+ $(NULL)
diff --git a/collectors/fping.plugin/Makefile.in b/collectors/fping.plugin/Makefile.in
new file mode 100644
index 000000000..67b9699b7
--- /dev/null
+++ b/collectors/fping.plugin/Makefile.in
@@ -0,0 +1,591 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+DIST_COMMON = $(top_srcdir)/build/subst.inc $(srcdir)/Makefile.in \
+ $(srcdir)/Makefile.am $(dist_plugins_SCRIPTS) \
+ $(dist_libconfig_DATA) $(dist_noinst_DATA)
+subdir = collectors/fping.plugin
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+ test -z "$$files" \
+ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+ $(am__cd) "$$dir" && rm -f $$files; }; \
+ }
+am__installdirs = "$(DESTDIR)$(pluginsdir)" \
+ "$(DESTDIR)$(libconfigdir)"
+SCRIPTS = $(dist_plugins_SCRIPTS)
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_libconfig_DATA) $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+CLEANFILES = \
+ fping.plugin \
+ $(NULL)
+
+SUFFIXES = .in
+dist_plugins_SCRIPTS = \
+ fping.plugin \
+ $(NULL)
+
+dist_noinst_DATA = \
+ fping.plugin.in \
+ README.md \
+ $(NULL)
+
+dist_libconfig_DATA = \
+ fping.conf \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .in
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/fping.plugin/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu collectors/fping.plugin/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+$(top_srcdir)/build/subst.inc:
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
+ done | \
+ sed -e 'p;s,.*/,,;n' \
+ -e 'h;s|.*|.|' \
+ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
+ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
+ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
+ if ($$2 == $$4) { files[d] = files[d] " " $$1; \
+ if (++n[d] == $(am__install_max)) { \
+ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
+ else { print "f", d "/" $$4, $$1 } } \
+ END { for (d in files) print "f", d, files[d] }' | \
+ while read type dir files; do \
+ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
+ test -z "$$files" || { \
+ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \
+ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \
+ } \
+ ; done
+
+uninstall-dist_pluginsSCRIPTS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \
+ files=`for p in $$list; do echo "$$p"; done | \
+ sed -e 's,.*/,,;$(transform)'`; \
+ dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir)
+install-dist_libconfigDATA: $(dist_libconfig_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \
+ done
+
+uninstall-dist_libconfigDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir)
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(SCRIPTS) $(DATA)
+installdirs:
+ for dir in "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(libconfigdir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+ -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am: install-dist_libconfigDATA \
+ install-dist_pluginsSCRIPTS
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-dist_libconfigDATA \
+ uninstall-dist_pluginsSCRIPTS
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dist_libconfigDATA \
+ install-dist_pluginsSCRIPTS install-dvi install-dvi-am \
+ install-exec install-exec-am install-html install-html-am \
+ install-info install-info-am install-man install-pdf \
+ install-pdf-am install-ps install-ps-am install-strip \
+ installcheck installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am \
+ uninstall-dist_libconfigDATA uninstall-dist_pluginsSCRIPTS
+
+.in:
+ if sed \
+ -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \
+ -e 's#[@]sbindir_POST@#$(sbindir)#g' \
+ -e 's#[@]sysconfdir_POST@#$(sysconfdir)#g' \
+ -e 's#[@]pythondir_POST@#$(pythondir)#g' \
+ -e 's#[@]configdir_POST@#$(configdir)#g' \
+ -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \
+ -e 's#[@]cachedir_POST@#$(cachedir)#g' \
+ $< > $@.tmp; then \
+ mv "$@.tmp" "$@"; \
+ else \
+ rm -f "$@.tmp"; \
+ false; \
+ fi
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/collectors/fping.plugin/README.md b/collectors/fping.plugin/README.md
new file mode 100644
index 000000000..0554a7edc
--- /dev/null
+++ b/collectors/fping.plugin/README.md
@@ -0,0 +1,96 @@
+# fping.plugin
+
+The fping plugin supports monitoring latency, packet loss and uptime of any number of network end points,
+by pinging them with `fping`.
+
+A recent version of `fping` is required (one that supports option ` -N `).
+The supplied plugin can install it, by running:
+
+```sh
+/usr/libexec/netdata/plugins.d/fping.plugin install
+```
+
+The above will download, build and install the right version as `/usr/local/bin/fping`.
+
+Then you need to edit `/etc/netdata/fping.conf` (to edit it on your system run
+`/etc/netdata/edit-config fping.conf`) like this:
+
+```sh
+# uncomment the following line - it should already be there
+fping="/usr/local/bin/fping"
+
+# set here all the hosts you need to ping
+# I suggest to use hostnames and put their IPs in /etc/hosts
+hosts="host1 host2 host3"
+
+# override the chart update frequency - the default is inherited from netdata
+update_every=1
+
+# time in milliseconds (1 sec = 1000 ms) to ping the hosts
+# 200 = 5 pings per second
+ping_every=200
+
+# other fping options - these are the defaults
+fping_opts="-R -b 56 -i 1 -r 0 -t 5000"
+```
+
+## alarms
+
+netdata will automatically attach a few alarms for each host.
+Check the [latest versions of the fping alarms](https://github.com/netdata/netdata/blob/master/health/health.d/fping.conf)
+
+## Additional Tips
+
+### Customizing Amount of Pings Per Second
+
+For example, to update the chart every 10 seconds and use 2 pings every 10 seconds, use this:
+
+```sh
+# Chart Update Frequency (Time in Seconds)
+update_every=10
+
+# Time in Milliseconds (1 sec = 1000 ms) to Ping the Hosts
+# The Following Example Sends 1 Ping Every 5000 ms
+# Calculation Formula: ping_every = (update_every * 1000 ) / 2
+ping_every=5000
+```
+
+### Multiple fping Plugins With Different Settings
+
+You may need to run multiple fping plugins with different settings for different end points.
+For example, you may need to ping a few hosts 10 times per second, and others once per second.
+
+netdata allows you to add as many `fping` plugins as you like.
+
+Follow this procedure:
+
+**1. Create New fping Configuration File**
+
+
+```sh
+# Step Into Configuration Directory
+cd /etc/netdata
+
+# Copy Original fping Configuration File To New Configuration File
+cp fping.conf fping2.conf
+```
+
+Edit `fping2.conf` and set the settings and the hosts you need for the seconds instance.
+
+**2. Soft Link Original fping Plugin to New Plugin File**
+
+```sh
+# Become root (If The Step Step Is Performed As Non-Root User)
+sudo su
+
+# Step Into The Plugins Directory
+cd /usr/libexec/netdata/plugins.d
+
+# Link fping.plugin to fping2.plugin
+ln -s fping.plugin fping2.plugin
+```
+
+That's it. netdata will detect the new plugin and start it.
+
+You can name the new plugin any name you like.
+Just make sure the plugin and the configuration file have the same name.
diff --git a/collectors/fping.plugin/fping.conf b/collectors/fping.plugin/fping.conf
new file mode 100644
index 000000000..63a7f7acd
--- /dev/null
+++ b/collectors/fping.plugin/fping.conf
@@ -0,0 +1,44 @@
+# no need for shebang - this file is sourced from fping.plugin
+
+# fping.plugin requires a recent version of fping.
+#
+# You can get it on your system, by running:
+#
+# /usr/libexec/netdata/plugins.d/fping.plugin install
+
+# -----------------------------------------------------------------------------
+# configuration options
+
+# The fping binary to use. We need one that can output netdata friendly info
+# (supporting: -N). If you have multiple versions, put here the full filename
+# of the right one
+
+#fping="/usr/local/bin/fping"
+
+
+# a space separated list of hosts to fping
+# we suggest to put names here and the IPs of these names in /etc/hosts
+
+hosts=""
+
+
+# The update frequency of the chart - the default is inherited from netdata
+
+#update_every=2
+
+
+# The time in milliseconds (1 sec = 1000 ms) to ping the hosts
+# by default 5 pings per host per iteration
+# fping will not allow this to be below 20ms
+
+#ping_every="200"
+
+
+# other fping options - defaults:
+# -R = send packets with random data
+# -b 56 = the number of bytes per packet
+# -i 1 = 1 ms when sending packets to others hosts (switching hosts)
+# -r 0 = never retry packets
+# -t 5000 = per packet timeout at 5000 ms
+
+#fping_opts="-R -b 56 -i 1 -r 0 -t 5000"
diff --git a/collectors/fping.plugin/fping.plugin b/collectors/fping.plugin/fping.plugin
new file mode 100644
index 000000000..cf8f17e9a
--- /dev/null
+++ b/collectors/fping.plugin/fping.plugin
@@ -0,0 +1,200 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+#
+# This plugin requires a latest version of fping.
+# You can compile it from source, by running me with option: install
+
+export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin"
+export LC_ALL=C
+
+if [ "${1}" = "install" ]
+ then
+ [ "${UID}" != 0 ] && echo >&2 "Please run me as root. This will install a single binary file: /usr/local/bin/fping." && exit 1
+
+ run() {
+ printf >&2 " > "
+ printf >&2 "%q " "${@}"
+ printf >&2 "\n"
+ "${@}" || exit 1
+ }
+
+ download() {
+ local curl="$(which curl 2>/dev/null || command -v curl 2>/dev/null)"
+ [ ! -z "${curl}" ] && run curl -s -L "${1}" && return 0
+
+ local wget="$(which wget 2>/dev/null || command -v wget 2>/dev/null)"
+ [ ! -z "${wget}" ] && run wget -q -O - "${1}" && return 0
+
+ echo >&2 "Cannot find 'curl' or 'wget' in this system." && exit 1
+ }
+
+ [ ! -d /usr/src ] && run mkdir -p /usr/src
+ [ ! -d /usr/local/bin ] && run mkdir -p /usr/local/bin
+
+ run cd /usr/src
+
+ if [ -d fping-4.0 ]
+ then
+ run rm -rf fping-4.0 || exit 1
+ fi
+
+ download 'https://github.com/schweikert/fping/releases/download/v4.0/fping-4.0.tar.gz' | run tar -zxvpf -
+ [ $? -ne 0 ] && exit 1
+ run cd fping-4.0 || exit 1
+
+ run ./configure --prefix=/usr/local
+ run make clean
+ run make
+ if [ -f /usr/local/bin/fping ]
+ then
+ run mv -f /usr/local/bin/fping /usr/local/bin/fping.old
+ fi
+ run mv src/fping /usr/local/bin/fping
+ run chown root:root /usr/local/bin/fping
+ run chmod 4755 /usr/local/bin/fping
+ echo >&2
+ echo >&2 "All done, you have a compatible fping now at /usr/local/bin/fping."
+ echo >&2
+
+ fping="$(which fping 2>/dev/null || command -v fping 2>/dev/null)"
+ if [ "${fping}" != "/usr/local/bin/fping" ]
+ then
+ echo >&2 "You have another fping installed at: ${fping}."
+ echo >&2 "Please set:"
+ echo >&2
+ echo >&2 " fping=\"/usr/local/bin/fping\""
+ echo >&2
+ echo >&2 "at /etc/netdata/fping.conf"
+ echo >&2
+ fi
+ exit 0
+fi
+
+# -----------------------------------------------------------------------------
+
+PROGRAM_NAME="$(basename "${0}")"
+
+logdate() {
+ date "+%Y-%m-%d %H:%M:%S"
+}
+
+log() {
+ local status="${1}"
+ shift
+
+ echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}"
+
+}
+
+warning() {
+ log WARNING "${@}"
+}
+
+error() {
+ log ERROR "${@}"
+}
+
+info() {
+ log INFO "${@}"
+}
+
+fatal() {
+ log FATAL "${@}"
+ echo "DISABLE"
+ exit 1
+}
+
+debug=0
+debug() {
+ [ $debug -eq 1 ] && log DEBUG "${@}"
+}
+
+# -----------------------------------------------------------------------------
+
+# store in ${plugin} the name we run under
+# this allows us to copy/link fping.plugin under a different name
+# to have multiple fping plugins running with different settings
+plugin="${PROGRAM_NAME/.plugin/}"
+
+
+# -----------------------------------------------------------------------------
+
+# the frequency to send info to netdata
+# passed by netdata as the first parameter
+update_every="${1-1}"
+
+# the netdata configuration directory
+# passed by netdata as an environment variable
+[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="/usr/local/etc/netdata"
+[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="/usr/local/lib/netdata/conf.d"
+
+# -----------------------------------------------------------------------------
+# configuration options
+# can be overwritten at /etc/netdata/fping.conf
+
+# the fping binary to use
+# we need one that can output netdata friendly info (supporting: -N)
+# if you have multiple versions, put here the full filename of the right one
+fping="$( which fping 2>/dev/null || command -v fping 2>/dev/null )"
+
+# a space separated list of hosts to fping
+# we suggest to put names here and the IPs of these names in /etc/hosts
+hosts=""
+
+# the time in milliseconds (1 sec = 1000 ms)
+# to ping the hosts - by default 5 pings per host per iteration
+ping_every="$((update_every * 1000 / 5))"
+
+# fping options
+fping_opts="-R -b 56 -i 1 -r 0 -t 5000"
+
+# -----------------------------------------------------------------------------
+# load the configuration files
+
+for CONFIG in "${NETDATA_STOCK_CONFIG_DIR}/${plugin}.conf" "${NETDATA_USER_CONFIG_DIR}/${plugin}.conf"
+do
+ if [ -f "${CONFIG}" ]
+ then
+ info "Loading config file '${CONFIG}'..."
+ source "${CONFIG}"
+ [ $? -ne 0 ] && error "Failed to load config file '${CONFIG}'."
+ else
+ warning "Cannot find file '${CONFIG}'."
+ fi
+done
+
+if [ -z "${hosts}" ]
+then
+ fatal "no hosts configured - nothing to do."
+fi
+
+if [ -z "${fping}" ]
+then
+ fatal "fping command is not found. Please set its full path in '${NETDATA_USER_CONFIG_DIR}/${plugin}.conf'"
+fi
+
+if [ ! -x "${fping}" ]
+then
+ fatal "fping command '${fping}' is not executable - cannot proceed."
+fi
+
+if [ ${ping_every} -lt 20 ]
+ then
+ warning "ping every was set to ${ping_every} but 20 is the minimum for non-root users. Setting it to 20 ms."
+ ping_every=20
+fi
+
+# the fping options we will use
+options=( -N -l -Q ${update_every} -p ${ping_every} ${fping_opts} ${hosts} )
+
+# execute fping
+info "starting fping: ${fping} ${options[*]}"
+exec "${fping}" "${options[@]}"
+
+# if we cannot execute fping, stop
+fatal "command '${fping} ${options[*]}' failed to be executed (returned code $?)."
diff --git a/collectors/fping.plugin/fping.plugin.in b/collectors/fping.plugin/fping.plugin.in
new file mode 100755
index 000000000..2c03e418e
--- /dev/null
+++ b/collectors/fping.plugin/fping.plugin.in
@@ -0,0 +1,200 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+#
+# This plugin requires a latest version of fping.
+# You can compile it from source, by running me with option: install
+
+export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin"
+export LC_ALL=C
+
+if [ "${1}" = "install" ]
+ then
+ [ "${UID}" != 0 ] && echo >&2 "Please run me as root. This will install a single binary file: /usr/local/bin/fping." && exit 1
+
+ run() {
+ printf >&2 " > "
+ printf >&2 "%q " "${@}"
+ printf >&2 "\n"
+ "${@}" || exit 1
+ }
+
+ download() {
+ local curl="$(which curl 2>/dev/null || command -v curl 2>/dev/null)"
+ [ ! -z "${curl}" ] && run curl -s -L "${1}" && return 0
+
+ local wget="$(which wget 2>/dev/null || command -v wget 2>/dev/null)"
+ [ ! -z "${wget}" ] && run wget -q -O - "${1}" && return 0
+
+ echo >&2 "Cannot find 'curl' or 'wget' in this system." && exit 1
+ }
+
+ [ ! -d /usr/src ] && run mkdir -p /usr/src
+ [ ! -d /usr/local/bin ] && run mkdir -p /usr/local/bin
+
+ run cd /usr/src
+
+ if [ -d fping-4.0 ]
+ then
+ run rm -rf fping-4.0 || exit 1
+ fi
+
+ download 'https://github.com/schweikert/fping/releases/download/v4.0/fping-4.0.tar.gz' | run tar -zxvpf -
+ [ $? -ne 0 ] && exit 1
+ run cd fping-4.0 || exit 1
+
+ run ./configure --prefix=/usr/local
+ run make clean
+ run make
+ if [ -f /usr/local/bin/fping ]
+ then
+ run mv -f /usr/local/bin/fping /usr/local/bin/fping.old
+ fi
+ run mv src/fping /usr/local/bin/fping
+ run chown root:root /usr/local/bin/fping
+ run chmod 4755 /usr/local/bin/fping
+ echo >&2
+ echo >&2 "All done, you have a compatible fping now at /usr/local/bin/fping."
+ echo >&2
+
+ fping="$(which fping 2>/dev/null || command -v fping 2>/dev/null)"
+ if [ "${fping}" != "/usr/local/bin/fping" ]
+ then
+ echo >&2 "You have another fping installed at: ${fping}."
+ echo >&2 "Please set:"
+ echo >&2
+ echo >&2 " fping=\"/usr/local/bin/fping\""
+ echo >&2
+ echo >&2 "at /etc/netdata/fping.conf"
+ echo >&2
+ fi
+ exit 0
+fi
+
+# -----------------------------------------------------------------------------
+
+PROGRAM_NAME="$(basename "${0}")"
+
+logdate() {
+ date "+%Y-%m-%d %H:%M:%S"
+}
+
+log() {
+ local status="${1}"
+ shift
+
+ echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}"
+
+}
+
+warning() {
+ log WARNING "${@}"
+}
+
+error() {
+ log ERROR "${@}"
+}
+
+info() {
+ log INFO "${@}"
+}
+
+fatal() {
+ log FATAL "${@}"
+ echo "DISABLE"
+ exit 1
+}
+
+debug=0
+debug() {
+ [ $debug -eq 1 ] && log DEBUG "${@}"
+}
+
+# -----------------------------------------------------------------------------
+
+# store in ${plugin} the name we run under
+# this allows us to copy/link fping.plugin under a different name
+# to have multiple fping plugins running with different settings
+plugin="${PROGRAM_NAME/.plugin/}"
+
+
+# -----------------------------------------------------------------------------
+
+# the frequency to send info to netdata
+# passed by netdata as the first parameter
+update_every="${1-1}"
+
+# the netdata configuration directory
+# passed by netdata as an environment variable
+[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="@configdir_POST@"
+[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="@libconfigdir_POST@"
+
+# -----------------------------------------------------------------------------
+# configuration options
+# can be overwritten at /etc/netdata/fping.conf
+
+# the fping binary to use
+# we need one that can output netdata friendly info (supporting: -N)
+# if you have multiple versions, put here the full filename of the right one
+fping="$( which fping 2>/dev/null || command -v fping 2>/dev/null )"
+
+# a space separated list of hosts to fping
+# we suggest to put names here and the IPs of these names in /etc/hosts
+hosts=""
+
+# the time in milliseconds (1 sec = 1000 ms)
+# to ping the hosts - by default 5 pings per host per iteration
+ping_every="$((update_every * 1000 / 5))"
+
+# fping options
+fping_opts="-R -b 56 -i 1 -r 0 -t 5000"
+
+# -----------------------------------------------------------------------------
+# load the configuration files
+
+for CONFIG in "${NETDATA_STOCK_CONFIG_DIR}/${plugin}.conf" "${NETDATA_USER_CONFIG_DIR}/${plugin}.conf"
+do
+ if [ -f "${CONFIG}" ]
+ then
+ info "Loading config file '${CONFIG}'..."
+ source "${CONFIG}"
+ [ $? -ne 0 ] && error "Failed to load config file '${CONFIG}'."
+ else
+ warning "Cannot find file '${CONFIG}'."
+ fi
+done
+
+if [ -z "${hosts}" ]
+then
+ fatal "no hosts configured - nothing to do."
+fi
+
+if [ -z "${fping}" ]
+then
+ fatal "fping command is not found. Please set its full path in '${NETDATA_USER_CONFIG_DIR}/${plugin}.conf'"
+fi
+
+if [ ! -x "${fping}" ]
+then
+ fatal "fping command '${fping}' is not executable - cannot proceed."
+fi
+
+if [ ${ping_every} -lt 20 ]
+ then
+ warning "ping every was set to ${ping_every} but 20 is the minimum for non-root users. Setting it to 20 ms."
+ ping_every=20
+fi
+
+# the fping options we will use
+options=( -N -l -Q ${update_every} -p ${ping_every} ${fping_opts} ${hosts} )
+
+# execute fping
+info "starting fping: ${fping} ${options[*]}"
+exec "${fping}" "${options[@]}"
+
+# if we cannot execute fping, stop
+fatal "command '${fping} ${options[*]}' failed to be executed (returned code $?)."
diff --git a/collectors/freebsd.plugin/Makefile.am b/collectors/freebsd.plugin/Makefile.am
new file mode 100644
index 000000000..e80ec702d
--- /dev/null
+++ b/collectors/freebsd.plugin/Makefile.am
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
diff --git a/collectors/freebsd.plugin/Makefile.in b/collectors/freebsd.plugin/Makefile.in
new file mode 100644
index 000000000..c88b3d755
--- /dev/null
+++ b/collectors/freebsd.plugin/Makefile.in
@@ -0,0 +1,457 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = collectors/freebsd.plugin
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/freebsd.plugin/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu collectors/freebsd.plugin/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/collectors/freebsd.plugin/freebsd_devstat.c b/collectors/freebsd.plugin/freebsd_devstat.c
new file mode 100644
index 000000000..10279aabc
--- /dev/null
+++ b/collectors/freebsd.plugin/freebsd_devstat.c
@@ -0,0 +1,780 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_freebsd.h"
+
+#include <sys/devicestat.h>
+
+struct disk {
+ char *name;
+ uint32_t hash;
+ size_t len;
+
+ // flags
+ int configured;
+ int enabled;
+ int updated;
+
+ int do_io;
+ int do_ops;
+ int do_qops;
+ int do_util;
+ int do_iotime;
+ int do_await;
+ int do_avagsz;
+ int do_svctm;
+
+
+ // data for differential charts
+
+ struct prev_dstat {
+ collected_number bytes_read;
+ collected_number bytes_write;
+ collected_number bytes_free;
+ collected_number operations_read;
+ collected_number operations_write;
+ collected_number operations_other;
+ collected_number operations_free;
+ collected_number duration_read_ms;
+ collected_number duration_write_ms;
+ collected_number duration_other_ms;
+ collected_number duration_free_ms;
+ collected_number busy_time_ms;
+ } prev_dstat;
+
+ // charts and dimensions
+
+ RRDSET *st_io;
+ RRDDIM *rd_io_in;
+ RRDDIM *rd_io_out;
+ RRDDIM *rd_io_free;
+
+ RRDSET *st_ops;
+ RRDDIM *rd_ops_in;
+ RRDDIM *rd_ops_out;
+ RRDDIM *rd_ops_other;
+ RRDDIM *rd_ops_free;
+
+ RRDSET *st_qops;
+ RRDDIM *rd_qops;
+
+ RRDSET *st_util;
+ RRDDIM *rd_util;
+
+ RRDSET *st_iotime;
+ RRDDIM *rd_iotime_in;
+ RRDDIM *rd_iotime_out;
+ RRDDIM *rd_iotime_other;
+ RRDDIM *rd_iotime_free;
+
+ RRDSET *st_await;
+ RRDDIM *rd_await_in;
+ RRDDIM *rd_await_out;
+ RRDDIM *rd_await_other;
+ RRDDIM *rd_await_free;
+
+ RRDSET *st_avagsz;
+ RRDDIM *rd_avagsz_in;
+ RRDDIM *rd_avagsz_out;
+ RRDDIM *rd_avagsz_free;
+
+ RRDSET *st_svctm;
+ RRDDIM *rd_svctm;
+
+ struct disk *next;
+};
+
+static struct disk *disks_root = NULL, *disks_last_used = NULL;
+
+static size_t disks_added = 0, disks_found = 0;
+
+static void disk_free(struct disk *dm) {
+ if (likely(dm->st_io))
+ rrdset_is_obsolete(dm->st_io);
+ if (likely(dm->st_ops))
+ rrdset_is_obsolete(dm->st_ops);
+ if (likely(dm->st_qops))
+ rrdset_is_obsolete(dm->st_qops);
+ if (likely(dm->st_util))
+ rrdset_is_obsolete(dm->st_util);
+ if (likely(dm->st_iotime))
+ rrdset_is_obsolete(dm->st_iotime);
+ if (likely(dm->st_await))
+ rrdset_is_obsolete(dm->st_await);
+ if (likely(dm->st_avagsz))
+ rrdset_is_obsolete(dm->st_avagsz);
+ if (likely(dm->st_svctm))
+ rrdset_is_obsolete(dm->st_svctm);
+
+ disks_added--;
+ freez(dm->name);
+ freez(dm);
+}
+
+static void disks_cleanup() {
+ if (likely(disks_found == disks_added)) return;
+
+ struct disk *dm = disks_root, *last = NULL;
+ while(dm) {
+ if (unlikely(!dm->updated)) {
+ // info("Removing disk '%s', linked after '%s'", dm->name, last?last->name:"ROOT");
+
+ if (disks_last_used == dm)
+ disks_last_used = last;
+
+ struct disk *t = dm;
+
+ if (dm == disks_root || !last)
+ disks_root = dm = dm->next;
+
+ else
+ last->next = dm = dm->next;
+
+ t->next = NULL;
+ disk_free(t);
+ }
+ else {
+ last = dm;
+ dm->updated = 0;
+ dm = dm->next;
+ }
+ }
+}
+
+static struct disk *get_disk(const char *name) {
+ struct disk *dm;
+
+ uint32_t hash = simple_hash(name);
+
+ // search it, from the last position to the end
+ for(dm = disks_last_used ; dm ; dm = dm->next) {
+ if (unlikely(hash == dm->hash && !strcmp(name, dm->name))) {
+ disks_last_used = dm->next;
+ return dm;
+ }
+ }
+
+ // search it from the beginning to the last position we used
+ for(dm = disks_root ; dm != disks_last_used ; dm = dm->next) {
+ if (unlikely(hash == dm->hash && !strcmp(name, dm->name))) {
+ disks_last_used = dm->next;
+ return dm;
+ }
+ }
+
+ // create a new one
+ dm = callocz(1, sizeof(struct disk));
+ dm->name = strdupz(name);
+ dm->hash = simple_hash(dm->name);
+ dm->len = strlen(dm->name);
+ disks_added++;
+
+ // link it to the end
+ if (disks_root) {
+ struct disk *e;
+ for(e = disks_root; e->next ; e = e->next) ;
+ e->next = dm;
+ }
+ else
+ disks_root = dm;
+
+ return dm;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// kern.devstat
+
+int do_kern_devstat(int update_every, usec_t dt) {
+
+#define DELAULT_EXLUDED_DISKS ""
+#define CONFIG_SECTION_KERN_DEVSTAT "plugin:freebsd:kern.devstat"
+#define BINTIME_SCALE 5.42101086242752217003726400434970855712890625e-17 // this is 1000/2^64
+
+ static int enable_new_disks = -1;
+ static int enable_pass_devices = -1, do_system_io = -1, do_io = -1, do_ops = -1, do_qops = -1, do_util = -1,
+ do_iotime = -1, do_await = -1, do_avagsz = -1, do_svctm = -1;
+ static SIMPLE_PATTERN *excluded_disks = NULL;
+
+ if (unlikely(enable_new_disks == -1)) {
+ enable_new_disks = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT,
+ "enable new disks detected at runtime", CONFIG_BOOLEAN_AUTO);
+
+ enable_pass_devices = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT,
+ "performance metrics for pass devices", CONFIG_BOOLEAN_AUTO);
+
+ do_system_io = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "total bandwidth for all disks",
+ CONFIG_BOOLEAN_YES);
+
+ do_io = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "bandwidth for all disks",
+ CONFIG_BOOLEAN_AUTO);
+ do_ops = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "operations for all disks",
+ CONFIG_BOOLEAN_AUTO);
+ do_qops = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "queued operations for all disks",
+ CONFIG_BOOLEAN_AUTO);
+ do_util = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "utilization percentage for all disks",
+ CONFIG_BOOLEAN_AUTO);
+ do_iotime = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "i/o time for all disks",
+ CONFIG_BOOLEAN_AUTO);
+ do_await = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "average completed i/o time for all disks",
+ CONFIG_BOOLEAN_AUTO);
+ do_avagsz = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "average completed i/o bandwidth for all disks",
+ CONFIG_BOOLEAN_AUTO);
+ do_svctm = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "average service time for all disks",
+ CONFIG_BOOLEAN_AUTO);
+
+ excluded_disks = simple_pattern_create(
+ config_get(CONFIG_SECTION_KERN_DEVSTAT, "disable by default disks matching", DELAULT_EXLUDED_DISKS)
+ , NULL
+ , SIMPLE_PATTERN_EXACT
+ );
+ }
+
+ if (likely(do_system_io || do_io || do_ops || do_qops || do_util || do_iotime || do_await || do_avagsz || do_svctm)) {
+ static int mib_numdevs[3] = {0, 0, 0};
+ int numdevs;
+ int common_error = 0;
+
+ if (unlikely(GETSYSCTL_SIMPLE("kern.devstat.numdevs", mib_numdevs, numdevs))) {
+ common_error = 1;
+ } else {
+ static int mib_devstat[3] = {0, 0, 0};
+ static void *devstat_data = NULL;
+ static int old_numdevs = 0;
+
+ if (unlikely(numdevs != old_numdevs)) {
+ devstat_data = reallocz(devstat_data, sizeof(long) + sizeof(struct devstat) *
+ numdevs); // there is generation number before devstat structures
+ old_numdevs = numdevs;
+ }
+ if (unlikely(GETSYSCTL_WSIZE("kern.devstat.all", mib_devstat, devstat_data,
+ sizeof(long) + sizeof(struct devstat) * numdevs))) {
+ common_error = 1;
+ } else {
+ struct devstat *dstat;
+ int i;
+ collected_number total_disk_kbytes_read = 0;
+ collected_number total_disk_kbytes_write = 0;
+
+ disks_found = 0;
+
+ dstat = devstat_data + sizeof(long); // skip generation number
+
+ for (i = 0; i < numdevs; i++) {
+ if (likely(do_system_io)) {
+ if (((dstat[i].device_type & DEVSTAT_TYPE_MASK) == DEVSTAT_TYPE_DIRECT) ||
+ ((dstat[i].device_type & DEVSTAT_TYPE_MASK) == DEVSTAT_TYPE_STORARRAY)) {
+ total_disk_kbytes_read += dstat[i].bytes[DEVSTAT_READ] / KILO_FACTOR;
+ total_disk_kbytes_write += dstat[i].bytes[DEVSTAT_WRITE] / KILO_FACTOR;
+ }
+ }
+
+ if (unlikely(!enable_pass_devices))
+ if ((dstat[i].device_type & DEVSTAT_TYPE_PASS) == DEVSTAT_TYPE_PASS)
+ continue;
+
+ if (((dstat[i].device_type & DEVSTAT_TYPE_MASK) == DEVSTAT_TYPE_DIRECT) ||
+ ((dstat[i].device_type & DEVSTAT_TYPE_MASK) == DEVSTAT_TYPE_STORARRAY)) {
+ char disk[DEVSTAT_NAME_LEN + MAX_INT_DIGITS + 1];
+ struct cur_dstat {
+ collected_number duration_read_ms;
+ collected_number duration_write_ms;
+ collected_number duration_other_ms;
+ collected_number duration_free_ms;
+ collected_number busy_time_ms;
+ } cur_dstat;
+
+ sprintf(disk, "%s%d", dstat[i].device_name, dstat[i].unit_number);
+
+ struct disk *dm = get_disk(disk);
+ dm->updated = 1;
+ disks_found++;
+
+ if(unlikely(!dm->configured)) {
+ char var_name[4096 + 1];
+
+ // this is the first time we see this disk
+
+ // remember we configured it
+ dm->configured = 1;
+
+ dm->enabled = enable_new_disks;
+
+ if (likely(dm->enabled))
+ dm->enabled = !simple_pattern_matches(excluded_disks, disk);
+
+ snprintfz(var_name, 4096, "%s:%s", CONFIG_SECTION_KERN_DEVSTAT, disk);
+ dm->enabled = config_get_boolean_ondemand(var_name, "enabled", dm->enabled);
+
+ dm->do_io = config_get_boolean_ondemand(var_name, "bandwidth", do_io);
+ dm->do_ops = config_get_boolean_ondemand(var_name, "operations", do_ops);
+ dm->do_qops = config_get_boolean_ondemand(var_name, "queued operations", do_qops);
+ dm->do_util = config_get_boolean_ondemand(var_name, "utilization percentage", do_util);
+ dm->do_iotime = config_get_boolean_ondemand(var_name, "i/o time", do_iotime);
+ dm->do_await = config_get_boolean_ondemand(var_name, "average completed i/o time",
+ do_await);
+ dm->do_avagsz = config_get_boolean_ondemand(var_name, "average completed i/o bandwidth",
+ do_avagsz);
+ dm->do_svctm = config_get_boolean_ondemand(var_name, "average service time", do_svctm);
+
+ // initialise data for differential charts
+
+ dm->prev_dstat.bytes_read = dstat[i].bytes[DEVSTAT_READ];
+ dm->prev_dstat.bytes_write = dstat[i].bytes[DEVSTAT_WRITE];
+ dm->prev_dstat.bytes_free = dstat[i].bytes[DEVSTAT_FREE];
+ dm->prev_dstat.operations_read = dstat[i].operations[DEVSTAT_READ];
+ dm->prev_dstat.operations_write = dstat[i].operations[DEVSTAT_WRITE];
+ dm->prev_dstat.operations_other = dstat[i].operations[DEVSTAT_NO_DATA];
+ dm->prev_dstat.operations_free = dstat[i].operations[DEVSTAT_FREE];
+ dm->prev_dstat.duration_read_ms = dstat[i].duration[DEVSTAT_READ].sec * 1000
+ + dstat[i].duration[DEVSTAT_READ].frac * BINTIME_SCALE;
+ dm->prev_dstat.duration_write_ms = dstat[i].duration[DEVSTAT_WRITE].sec * 1000
+ + dstat[i].duration[DEVSTAT_WRITE].frac * BINTIME_SCALE;
+ dm->prev_dstat.duration_other_ms = dstat[i].duration[DEVSTAT_NO_DATA].sec * 1000
+ + dstat[i].duration[DEVSTAT_NO_DATA].frac * BINTIME_SCALE;
+ dm->prev_dstat.duration_free_ms = dstat[i].duration[DEVSTAT_FREE].sec * 1000
+ + dstat[i].duration[DEVSTAT_FREE].frac * BINTIME_SCALE;
+ dm->prev_dstat.busy_time_ms = dstat[i].busy_time.sec * 1000
+ + dstat[i].busy_time.frac * BINTIME_SCALE;
+ }
+
+ cur_dstat.duration_read_ms = dstat[i].duration[DEVSTAT_READ].sec * 1000
+ + dstat[i].duration[DEVSTAT_READ].frac * BINTIME_SCALE;
+ cur_dstat.duration_write_ms = dstat[i].duration[DEVSTAT_WRITE].sec * 1000
+ + dstat[i].duration[DEVSTAT_WRITE].frac * BINTIME_SCALE;
+ cur_dstat.duration_other_ms = dstat[i].duration[DEVSTAT_NO_DATA].sec * 1000
+ + dstat[i].duration[DEVSTAT_NO_DATA].frac * BINTIME_SCALE;
+ cur_dstat.duration_free_ms = dstat[i].duration[DEVSTAT_FREE].sec * 1000
+ + dstat[i].duration[DEVSTAT_FREE].frac * BINTIME_SCALE;
+
+ cur_dstat.busy_time_ms = dstat[i].busy_time.sec * 1000 + dstat[i].busy_time.frac * BINTIME_SCALE;
+
+ // --------------------------------------------------------------------
+
+ if(dm->do_io == CONFIG_BOOLEAN_YES || (dm->do_io == CONFIG_BOOLEAN_AUTO &&
+ (dstat[i].bytes[DEVSTAT_READ] ||
+ dstat[i].bytes[DEVSTAT_WRITE] ||
+ dstat[i].bytes[DEVSTAT_FREE]))) {
+ if (unlikely(!dm->st_io)) {
+ dm->st_io = rrdset_create_localhost("disk",
+ disk,
+ NULL,
+ disk,
+ "disk.io",
+ "Disk I/O Bandwidth",
+ "kilobytes/s",
+ "freebsd.plugin",
+ "devstat",
+ NETDATA_CHART_PRIO_DISK_IO,
+ update_every,
+ RRDSET_TYPE_AREA
+ );
+
+ dm->rd_io_in = rrddim_add(dm->st_io, "reads", NULL, 1, KILO_FACTOR,
+ RRD_ALGORITHM_INCREMENTAL);
+ dm->rd_io_out = rrddim_add(dm->st_io, "writes", NULL, -1, KILO_FACTOR,
+ RRD_ALGORITHM_INCREMENTAL);
+ dm->rd_io_free = rrddim_add(dm->st_io, "frees", NULL, -1, KILO_FACTOR,
+ RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(dm->st_io);
+
+ rrddim_set_by_pointer(dm->st_io, dm->rd_io_in, dstat[i].bytes[DEVSTAT_READ]);
+ rrddim_set_by_pointer(dm->st_io, dm->rd_io_out, dstat[i].bytes[DEVSTAT_WRITE]);
+ rrddim_set_by_pointer(dm->st_io, dm->rd_io_free, dstat[i].bytes[DEVSTAT_FREE]);
+ rrdset_done(dm->st_io);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(dm->do_ops == CONFIG_BOOLEAN_YES || (dm->do_ops == CONFIG_BOOLEAN_AUTO &&
+ (dstat[i].operations[DEVSTAT_READ] ||
+ dstat[i].operations[DEVSTAT_WRITE] ||
+ dstat[i].operations[DEVSTAT_NO_DATA] ||
+ dstat[i].operations[DEVSTAT_FREE]))) {
+ if (unlikely(!dm->st_ops)) {
+ dm->st_ops = rrdset_create_localhost("disk_ops",
+ disk,
+ NULL,
+ disk,
+ "disk.ops",
+ "Disk Completed I/O Operations",
+ "operations/s",
+ "freebsd.plugin",
+ "devstat",
+ NETDATA_CHART_PRIO_DISK_OPS,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(dm->st_ops, RRDSET_FLAG_DETAIL);
+
+ dm->rd_ops_in = rrddim_add(dm->st_ops, "reads", NULL, 1, 1,
+ RRD_ALGORITHM_INCREMENTAL);
+ dm->rd_ops_out = rrddim_add(dm->st_ops, "writes", NULL, -1, 1,
+ RRD_ALGORITHM_INCREMENTAL);
+ dm->rd_ops_other = rrddim_add(dm->st_ops, "other", NULL, 1, 1,
+ RRD_ALGORITHM_INCREMENTAL);
+ dm->rd_ops_free = rrddim_add(dm->st_ops, "frees", NULL, -1, 1,
+ RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(dm->st_ops);
+
+ rrddim_set_by_pointer(dm->st_ops, dm->rd_ops_in, dstat[i].operations[DEVSTAT_READ]);
+ rrddim_set_by_pointer(dm->st_ops, dm->rd_ops_out, dstat[i].operations[DEVSTAT_WRITE]);
+ rrddim_set_by_pointer(dm->st_ops, dm->rd_ops_other, dstat[i].operations[DEVSTAT_NO_DATA]);
+ rrddim_set_by_pointer(dm->st_ops, dm->rd_ops_free, dstat[i].operations[DEVSTAT_FREE]);
+ rrdset_done(dm->st_ops);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(dm->do_qops == CONFIG_BOOLEAN_YES || (dm->do_qops == CONFIG_BOOLEAN_AUTO &&
+ (dstat[i].start_count || dstat[i].end_count))) {
+ if (unlikely(!dm->st_qops)) {
+ dm->st_qops = rrdset_create_localhost("disk_qops",
+ disk,
+ NULL,
+ disk,
+ "disk.qops",
+ "Disk Current I/O Operations",
+ "operations",
+ "freebsd.plugin",
+ "devstat",
+ NETDATA_CHART_PRIO_DISK_QOPS,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(dm->st_qops, RRDSET_FLAG_DETAIL);
+
+ dm->rd_qops = rrddim_add(dm->st_qops, "operations", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ } else
+ rrdset_next(dm->st_qops);
+
+ rrddim_set_by_pointer(dm->st_qops, dm->rd_qops, dstat[i].start_count - dstat[i].end_count);
+ rrdset_done(dm->st_qops);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(dm->do_util == CONFIG_BOOLEAN_YES || (dm->do_util == CONFIG_BOOLEAN_AUTO &&
+ cur_dstat.busy_time_ms)) {
+ if (unlikely(!dm->st_util)) {
+ dm->st_util = rrdset_create_localhost("disk_util",
+ disk,
+ NULL,
+ disk,
+ "disk.util",
+ "Disk Utilization Time",
+ "% of time working",
+ "freebsd.plugin",
+ "devstat",
+ NETDATA_CHART_PRIO_DISK_UTIL,
+ update_every,
+ RRDSET_TYPE_AREA
+ );
+
+ rrdset_flag_set(dm->st_util, RRDSET_FLAG_DETAIL);
+
+ dm->rd_util = rrddim_add(dm->st_util, "utilization", NULL, 1, 10,
+ RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(dm->st_util);
+
+ rrddim_set_by_pointer(dm->st_util, dm->rd_util, cur_dstat.busy_time_ms);
+ rrdset_done(dm->st_util);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(dm->do_iotime == CONFIG_BOOLEAN_YES || (dm->do_iotime == CONFIG_BOOLEAN_AUTO &&
+ (cur_dstat.duration_read_ms ||
+ cur_dstat.duration_write_ms ||
+ cur_dstat.duration_other_ms ||
+ cur_dstat.duration_free_ms))) {
+ if (unlikely(!dm->st_iotime)) {
+ dm->st_iotime = rrdset_create_localhost("disk_iotime",
+ disk,
+ NULL,
+ disk,
+ "disk.iotime",
+ "Disk Total I/O Time",
+ "milliseconds/s",
+ "freebsd.plugin",
+ "devstat",
+ NETDATA_CHART_PRIO_DISK_IOTIME,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(dm->st_iotime, RRDSET_FLAG_DETAIL);
+
+ dm->rd_iotime_in = rrddim_add(dm->st_iotime, "reads", NULL, 1, 1,
+ RRD_ALGORITHM_INCREMENTAL);
+ dm->rd_iotime_out = rrddim_add(dm->st_iotime, "writes", NULL, -1, 1,
+ RRD_ALGORITHM_INCREMENTAL);
+ dm->rd_iotime_other = rrddim_add(dm->st_iotime, "other", NULL, 1, 1,
+ RRD_ALGORITHM_INCREMENTAL);
+ dm->rd_iotime_free = rrddim_add(dm->st_iotime, "frees", NULL, -1, 1,
+ RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(dm->st_iotime);
+
+ rrddim_set_by_pointer(dm->st_iotime, dm->rd_iotime_in, cur_dstat.duration_read_ms);
+ rrddim_set_by_pointer(dm->st_iotime, dm->rd_iotime_out, cur_dstat.duration_write_ms);
+ rrddim_set_by_pointer(dm->st_iotime, dm->rd_iotime_other, cur_dstat.duration_other_ms);
+ rrddim_set_by_pointer(dm->st_iotime, dm->rd_iotime_free, cur_dstat.duration_free_ms);
+ rrdset_done(dm->st_iotime);
+ }
+
+ // --------------------------------------------------------------------
+ // calculate differential charts
+ // only if this is not the first time we run
+
+ if (likely(dt)) {
+
+ // --------------------------------------------------------------------
+
+ if(dm->do_await == CONFIG_BOOLEAN_YES || (dm->do_await == CONFIG_BOOLEAN_AUTO &&
+ (dstat[i].operations[DEVSTAT_READ] ||
+ dstat[i].operations[DEVSTAT_WRITE] ||
+ dstat[i].operations[DEVSTAT_NO_DATA] ||
+ dstat[i].operations[DEVSTAT_FREE]))) {
+ if (unlikely(!dm->st_await)) {
+ dm->st_await = rrdset_create_localhost("disk_await",
+ disk,
+ NULL,
+ disk,
+ "disk.await",
+ "Average Completed I/O Operation Time",
+ "ms per operation",
+ "freebsd.plugin",
+ "devstat",
+ NETDATA_CHART_PRIO_DISK_AWAIT,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(dm->st_await, RRDSET_FLAG_DETAIL);
+
+ dm->rd_await_in = rrddim_add(dm->st_await, "reads", NULL, 1, 1,
+ RRD_ALGORITHM_ABSOLUTE);
+ dm->rd_await_out = rrddim_add(dm->st_await, "writes", NULL, -1, 1,
+ RRD_ALGORITHM_ABSOLUTE);
+ dm->rd_await_other = rrddim_add(dm->st_await, "other", NULL, 1, 1,
+ RRD_ALGORITHM_ABSOLUTE);
+ dm->rd_await_free = rrddim_add(dm->st_await, "frees", NULL, -1, 1,
+ RRD_ALGORITHM_ABSOLUTE);
+ } else
+ rrdset_next(dm->st_await);
+
+ rrddim_set_by_pointer(dm->st_await, dm->rd_await_in,
+ (dstat[i].operations[DEVSTAT_READ] -
+ dm->prev_dstat.operations_read) ?
+ (cur_dstat.duration_read_ms - dm->prev_dstat.duration_read_ms) /
+ (dstat[i].operations[DEVSTAT_READ] -
+ dm->prev_dstat.operations_read) :
+ 0);
+ rrddim_set_by_pointer(dm->st_await, dm->rd_await_out,
+ (dstat[i].operations[DEVSTAT_WRITE] -
+ dm->prev_dstat.operations_write) ?
+ (cur_dstat.duration_write_ms - dm->prev_dstat.duration_write_ms) /
+ (dstat[i].operations[DEVSTAT_WRITE] -
+ dm->prev_dstat.operations_write) :
+ 0);
+ rrddim_set_by_pointer(dm->st_await, dm->rd_await_other,
+ (dstat[i].operations[DEVSTAT_NO_DATA] -
+ dm->prev_dstat.operations_other) ?
+ (cur_dstat.duration_other_ms - dm->prev_dstat.duration_other_ms) /
+ (dstat[i].operations[DEVSTAT_NO_DATA] -
+ dm->prev_dstat.operations_other) :
+ 0);
+ rrddim_set_by_pointer(dm->st_await, dm->rd_await_free,
+ (dstat[i].operations[DEVSTAT_FREE] -
+ dm->prev_dstat.operations_free) ?
+ (cur_dstat.duration_free_ms - dm->prev_dstat.duration_free_ms) /
+ (dstat[i].operations[DEVSTAT_FREE] -
+ dm->prev_dstat.operations_free) :
+ 0);
+ rrdset_done(dm->st_await);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(dm->do_avagsz == CONFIG_BOOLEAN_YES || (dm->do_avagsz == CONFIG_BOOLEAN_AUTO &&
+ (dstat[i].operations[DEVSTAT_READ] ||
+ dstat[i].operations[DEVSTAT_WRITE] ||
+ dstat[i].operations[DEVSTAT_FREE]))) {
+ if (unlikely(!dm->st_avagsz)) {
+ dm->st_avagsz = rrdset_create_localhost("disk_avgsz",
+ disk,
+ NULL,
+ disk,
+ "disk.avgsz",
+ "Average Completed I/O Operation Bandwidth",
+ "kilobytes per operation",
+ "freebsd.plugin",
+ "devstat",
+ NETDATA_CHART_PRIO_DISK_AVGSZ,
+ update_every,
+ RRDSET_TYPE_AREA
+ );
+
+ rrdset_flag_set(dm->st_avagsz, RRDSET_FLAG_DETAIL);
+
+ dm->rd_avagsz_in = rrddim_add(dm->st_avagsz, "reads", NULL, 1, KILO_FACTOR,
+ RRD_ALGORITHM_ABSOLUTE);
+ dm->rd_avagsz_out = rrddim_add(dm->st_avagsz, "writes", NULL, -1, KILO_FACTOR,
+ RRD_ALGORITHM_ABSOLUTE);
+ dm->rd_avagsz_free = rrddim_add(dm->st_avagsz, "frees", NULL, -1, KILO_FACTOR,
+ RRD_ALGORITHM_ABSOLUTE);
+ } else
+ rrdset_next(dm->st_avagsz);
+
+ rrddim_set_by_pointer(dm->st_avagsz, dm->rd_avagsz_in,
+ (dstat[i].operations[DEVSTAT_READ] -
+ dm->prev_dstat.operations_read) ?
+ (dstat[i].bytes[DEVSTAT_READ] - dm->prev_dstat.bytes_read) /
+ (dstat[i].operations[DEVSTAT_READ] -
+ dm->prev_dstat.operations_read) :
+ 0);
+ rrddim_set_by_pointer(dm->st_avagsz, dm->rd_avagsz_out,
+ (dstat[i].operations[DEVSTAT_WRITE] -
+ dm->prev_dstat.operations_write) ?
+ (dstat[i].bytes[DEVSTAT_WRITE] - dm->prev_dstat.bytes_write) /
+ (dstat[i].operations[DEVSTAT_WRITE] -
+ dm->prev_dstat.operations_write) :
+ 0);
+ rrddim_set_by_pointer(dm->st_avagsz, dm->rd_avagsz_free,
+ (dstat[i].operations[DEVSTAT_FREE] -
+ dm->prev_dstat.operations_free) ?
+ (dstat[i].bytes[DEVSTAT_FREE] - dm->prev_dstat.bytes_free) /
+ (dstat[i].operations[DEVSTAT_FREE] -
+ dm->prev_dstat.operations_free) :
+ 0);
+ rrdset_done(dm->st_avagsz);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(dm->do_svctm == CONFIG_BOOLEAN_YES || (dm->do_svctm == CONFIG_BOOLEAN_AUTO &&
+ (dstat[i].operations[DEVSTAT_READ] ||
+ dstat[i].operations[DEVSTAT_WRITE] ||
+ dstat[i].operations[DEVSTAT_NO_DATA] ||
+ dstat[i].operations[DEVSTAT_FREE]))) {
+ if (unlikely(!dm->st_svctm)) {
+ dm->st_svctm = rrdset_create_localhost("disk_svctm",
+ disk,
+ NULL,
+ disk,
+ "disk.svctm",
+ "Average Service Time",
+ "ms per operation",
+ "freebsd.plugin",
+ "devstat",
+ NETDATA_CHART_PRIO_DISK_SVCTM,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(dm->st_svctm, RRDSET_FLAG_DETAIL);
+
+ dm->rd_svctm = rrddim_add(dm->st_svctm, "svctm", NULL, 1, 1,
+ RRD_ALGORITHM_ABSOLUTE);
+ } else
+ rrdset_next(dm->st_svctm);
+
+ rrddim_set_by_pointer(dm->st_svctm, dm->rd_svctm,
+ ((dstat[i].operations[DEVSTAT_READ] - dm->prev_dstat.operations_read) +
+ (dstat[i].operations[DEVSTAT_WRITE] - dm->prev_dstat.operations_write) +
+ (dstat[i].operations[DEVSTAT_NO_DATA] - dm->prev_dstat.operations_other) +
+ (dstat[i].operations[DEVSTAT_FREE] - dm->prev_dstat.operations_free)) ?
+ (cur_dstat.busy_time_ms - dm->prev_dstat.busy_time_ms) /
+ ((dstat[i].operations[DEVSTAT_READ] - dm->prev_dstat.operations_read) +
+ (dstat[i].operations[DEVSTAT_WRITE] - dm->prev_dstat.operations_write) +
+ (dstat[i].operations[DEVSTAT_NO_DATA] - dm->prev_dstat.operations_other) +
+ (dstat[i].operations[DEVSTAT_FREE] - dm->prev_dstat.operations_free)) :
+ 0);
+ rrdset_done(dm->st_svctm);
+ }
+
+ // --------------------------------------------------------------------
+
+ dm->prev_dstat.bytes_read = dstat[i].bytes[DEVSTAT_READ];
+ dm->prev_dstat.bytes_write = dstat[i].bytes[DEVSTAT_WRITE];
+ dm->prev_dstat.bytes_free = dstat[i].bytes[DEVSTAT_FREE];
+ dm->prev_dstat.operations_read = dstat[i].operations[DEVSTAT_READ];
+ dm->prev_dstat.operations_write = dstat[i].operations[DEVSTAT_WRITE];
+ dm->prev_dstat.operations_other = dstat[i].operations[DEVSTAT_NO_DATA];
+ dm->prev_dstat.operations_free = dstat[i].operations[DEVSTAT_FREE];
+ dm->prev_dstat.duration_read_ms = cur_dstat.duration_read_ms;
+ dm->prev_dstat.duration_write_ms = cur_dstat.duration_write_ms;
+ dm->prev_dstat.duration_other_ms = cur_dstat.duration_other_ms;
+ dm->prev_dstat.duration_free_ms = cur_dstat.duration_free_ms;
+ dm->prev_dstat.busy_time_ms = cur_dstat.busy_time_ms;
+ }
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_system_io)) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost("system",
+ "io",
+ NULL,
+ "disk",
+ NULL,
+ "Disk I/O",
+ "kilobytes/s",
+ "freebsd.plugin",
+ "devstat",
+ NETDATA_CHART_PRIO_SYSTEM_IO,
+ update_every,
+ RRDSET_TYPE_AREA
+ );
+
+ rd_in = rrddim_add(st, "in", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st, "out", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_in, total_disk_kbytes_read);
+ rrddim_set_by_pointer(st, rd_out, total_disk_kbytes_write);
+ rrdset_done(st);
+ }
+ }
+ }
+ if (unlikely(common_error)) {
+ do_system_io = 0;
+ error("DISABLED: system.io chart");
+ do_io = 0;
+ error("DISABLED: disk.* charts");
+ do_ops = 0;
+ error("DISABLED: disk_ops.* charts");
+ do_qops = 0;
+ error("DISABLED: disk_qops.* charts");
+ do_util = 0;
+ error("DISABLED: disk_util.* charts");
+ do_iotime = 0;
+ error("DISABLED: disk_iotime.* charts");
+ do_await = 0;
+ error("DISABLED: disk_await.* charts");
+ do_avagsz = 0;
+ error("DISABLED: disk_avgsz.* charts");
+ do_svctm = 0;
+ error("DISABLED: disk_svctm.* charts");
+ error("DISABLED: kern.devstat module");
+ return 1;
+ }
+ } else {
+ error("DISABLED: kern.devstat module");
+ return 1;
+ }
+
+ disks_cleanup();
+
+ return 0;
+}
diff --git a/collectors/freebsd.plugin/freebsd_getifaddrs.c b/collectors/freebsd.plugin/freebsd_getifaddrs.c
new file mode 100644
index 000000000..e15845857
--- /dev/null
+++ b/collectors/freebsd.plugin/freebsd_getifaddrs.c
@@ -0,0 +1,618 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_freebsd.h"
+
+#include <ifaddrs.h>
+
+struct cgroup_network_interface {
+ char *name;
+ uint32_t hash;
+ size_t len;
+
+ // flags
+ int configured;
+ int enabled;
+ int updated;
+
+ int do_bandwidth;
+ int do_packets;
+ int do_errors;
+ int do_drops;
+ int do_events;
+
+ // charts and dimensions
+
+ RRDSET *st_bandwidth;
+ RRDDIM *rd_bandwidth_in;
+ RRDDIM *rd_bandwidth_out;
+
+ RRDSET *st_packets;
+ RRDDIM *rd_packets_in;
+ RRDDIM *rd_packets_out;
+ RRDDIM *rd_packets_m_in;
+ RRDDIM *rd_packets_m_out;
+
+ RRDSET *st_errors;
+ RRDDIM *rd_errors_in;
+ RRDDIM *rd_errors_out;
+
+ RRDSET *st_drops;
+ RRDDIM *rd_drops_in;
+ RRDDIM *rd_drops_out;
+
+ RRDSET *st_events;
+ RRDDIM *rd_events_coll;
+
+ struct cgroup_network_interface *next;
+};
+
+static struct cgroup_network_interface *network_interfaces_root = NULL, *network_interfaces_last_used = NULL;
+
+static size_t network_interfaces_added = 0, network_interfaces_found = 0;
+
+static void network_interface_free(struct cgroup_network_interface *ifm) {
+ if (likely(ifm->st_bandwidth))
+ rrdset_is_obsolete(ifm->st_bandwidth);
+ if (likely(ifm->st_packets))
+ rrdset_is_obsolete(ifm->st_packets);
+ if (likely(ifm->st_errors))
+ rrdset_is_obsolete(ifm->st_errors);
+ if (likely(ifm->st_drops))
+ rrdset_is_obsolete(ifm->st_drops);
+ if (likely(ifm->st_events))
+ rrdset_is_obsolete(ifm->st_events);
+
+ network_interfaces_added--;
+ freez(ifm->name);
+ freez(ifm);
+}
+
+static void network_interfaces_cleanup() {
+ if (likely(network_interfaces_found == network_interfaces_added)) return;
+
+ struct cgroup_network_interface *ifm = network_interfaces_root, *last = NULL;
+ while(ifm) {
+ if (unlikely(!ifm->updated)) {
+ // info("Removing network interface '%s', linked after '%s'", ifm->name, last?last->name:"ROOT");
+
+ if (network_interfaces_last_used == ifm)
+ network_interfaces_last_used = last;
+
+ struct cgroup_network_interface *t = ifm;
+
+ if (ifm == network_interfaces_root || !last)
+ network_interfaces_root = ifm = ifm->next;
+
+ else
+ last->next = ifm = ifm->next;
+
+ t->next = NULL;
+ network_interface_free(t);
+ }
+ else {
+ last = ifm;
+ ifm->updated = 0;
+ ifm = ifm->next;
+ }
+ }
+}
+
+static struct cgroup_network_interface *get_network_interface(const char *name) {
+ struct cgroup_network_interface *ifm;
+
+ uint32_t hash = simple_hash(name);
+
+ // search it, from the last position to the end
+ for(ifm = network_interfaces_last_used ; ifm ; ifm = ifm->next) {
+ if (unlikely(hash == ifm->hash && !strcmp(name, ifm->name))) {
+ network_interfaces_last_used = ifm->next;
+ return ifm;
+ }
+ }
+
+ // search it from the beginning to the last position we used
+ for(ifm = network_interfaces_root ; ifm != network_interfaces_last_used ; ifm = ifm->next) {
+ if (unlikely(hash == ifm->hash && !strcmp(name, ifm->name))) {
+ network_interfaces_last_used = ifm->next;
+ return ifm;
+ }
+ }
+
+ // create a new one
+ ifm = callocz(1, sizeof(struct cgroup_network_interface));
+ ifm->name = strdupz(name);
+ ifm->hash = simple_hash(ifm->name);
+ ifm->len = strlen(ifm->name);
+ network_interfaces_added++;
+
+ // link it to the end
+ if (network_interfaces_root) {
+ struct cgroup_network_interface *e;
+ for(e = network_interfaces_root; e->next ; e = e->next) ;
+ e->next = ifm;
+ }
+ else
+ network_interfaces_root = ifm;
+
+ return ifm;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// getifaddrs
+
+int do_getifaddrs(int update_every, usec_t dt) {
+ (void)dt;
+
+#define DEFAULT_EXLUDED_INTERFACES "lo*"
+#define DEFAULT_PHYSICAL_INTERFACES "igb* ix* cxl* em* ixl* ixlv* bge* ixgbe*"
+#define CONFIG_SECTION_GETIFADDRS "plugin:freebsd:getifaddrs"
+
+ static int enable_new_interfaces = -1;
+ static int do_bandwidth_ipv4 = -1, do_bandwidth_ipv6 = -1, do_bandwidth = -1, do_packets = -1, do_bandwidth_net = -1, do_packets_net = -1,
+ do_errors = -1, do_drops = -1, do_events = -1;
+ static SIMPLE_PATTERN *excluded_interfaces = NULL, *physical_interfaces = NULL;
+
+ if (unlikely(enable_new_interfaces == -1)) {
+ enable_new_interfaces = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS,
+ "enable new interfaces detected at runtime",
+ CONFIG_BOOLEAN_AUTO);
+
+ do_bandwidth_net = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "total bandwidth for physical interfaces",
+ CONFIG_BOOLEAN_AUTO);
+ do_packets_net = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "total packets for physical interfaces",
+ CONFIG_BOOLEAN_AUTO);
+ do_bandwidth_ipv4 = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "total bandwidth for ipv4 interfaces",
+ CONFIG_BOOLEAN_AUTO);
+ do_bandwidth_ipv6 = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "total bandwidth for ipv6 interfaces",
+ CONFIG_BOOLEAN_AUTO);
+ do_bandwidth = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "bandwidth for all interfaces",
+ CONFIG_BOOLEAN_AUTO);
+ do_packets = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "packets for all interfaces",
+ CONFIG_BOOLEAN_AUTO);
+ do_errors = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "errors for all interfaces",
+ CONFIG_BOOLEAN_AUTO);
+ do_drops = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "drops for all interfaces",
+ CONFIG_BOOLEAN_AUTO);
+ do_events = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "collisions for all interfaces",
+ CONFIG_BOOLEAN_AUTO);
+
+ excluded_interfaces = simple_pattern_create(
+ config_get(CONFIG_SECTION_GETIFADDRS, "disable by default interfaces matching", DEFAULT_EXLUDED_INTERFACES)
+ , NULL
+ , SIMPLE_PATTERN_EXACT
+ );
+ physical_interfaces = simple_pattern_create(
+ config_get(CONFIG_SECTION_GETIFADDRS, "set physical interfaces for system.net", DEFAULT_PHYSICAL_INTERFACES)
+ , NULL
+ , SIMPLE_PATTERN_EXACT
+ );
+ }
+
+ if (likely(do_bandwidth_ipv4 || do_bandwidth_ipv6 || do_bandwidth || do_packets || do_errors || do_bandwidth_net || do_packets_net ||
+ do_drops || do_events)) {
+ struct ifaddrs *ifap;
+
+ if (unlikely(getifaddrs(&ifap))) {
+ error("FREEBSD: getifaddrs() failed");
+ do_bandwidth_net = 0;
+ error("DISABLED: system.net chart");
+ do_packets_net = 0;
+ error("DISABLED: system.packets chart");
+ do_bandwidth_ipv4 = 0;
+ error("DISABLED: system.ipv4 chart");
+ do_bandwidth_ipv6 = 0;
+ error("DISABLED: system.ipv6 chart");
+ do_bandwidth = 0;
+ error("DISABLED: net.* charts");
+ do_packets = 0;
+ error("DISABLED: net_packets.* charts");
+ do_errors = 0;
+ error("DISABLED: net_errors.* charts");
+ do_drops = 0;
+ error("DISABLED: net_drops.* charts");
+ do_events = 0;
+ error("DISABLED: net_events.* charts");
+ error("DISABLED: getifaddrs module");
+ return 1;
+ } else {
+#define IFA_DATA(s) (((struct if_data *)ifa->ifa_data)->ifi_ ## s)
+ struct ifaddrs *ifa;
+ struct iftot {
+ u_long ift_ibytes;
+ u_long ift_obytes;
+ u_long ift_ipackets;
+ u_long ift_opackets;
+ u_long ift_imcasts;
+ u_long ift_omcasts;
+ } iftot = {0, 0, 0, 0, 0, 0};
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_bandwidth_net)) {
+
+ iftot.ift_ibytes = iftot.ift_obytes = 0;
+ for (ifa = ifap; ifa; ifa = ifa->ifa_next) {
+ if (ifa->ifa_addr->sa_family != AF_LINK)
+ continue;
+ if (!simple_pattern_matches(physical_interfaces, ifa->ifa_name))
+ continue;
+ iftot.ift_ibytes += IFA_DATA(ibytes);
+ iftot.ift_obytes += IFA_DATA(obytes);
+ }
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost("system",
+ "net",
+ NULL,
+ "network",
+ NULL,
+ "Network Traffic",
+ "kilobits/s",
+ "freebsd.plugin",
+ "getifaddrs",
+ NETDATA_CHART_PRIO_SYSTEM_NET,
+ update_every,
+ RRDSET_TYPE_AREA
+ );
+
+ rd_in = rrddim_add(st, "InOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st, "OutOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_in, iftot.ift_ibytes);
+ rrddim_set_by_pointer(st, rd_out, iftot.ift_obytes);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_packets_net)) {
+
+ iftot.ift_ipackets = iftot.ift_opackets = iftot.ift_imcasts = iftot.ift_omcasts = 0;
+ for (ifa = ifap; ifa; ifa = ifa->ifa_next) {
+ if (ifa->ifa_addr->sa_family != AF_LINK)
+ continue;
+ if (!simple_pattern_matches(physical_interfaces, ifa->ifa_name))
+ continue;
+ iftot.ift_ipackets += IFA_DATA(ipackets);
+ iftot.ift_opackets += IFA_DATA(opackets);
+ iftot.ift_imcasts += IFA_DATA(imcasts);
+ iftot.ift_omcasts += IFA_DATA(omcasts);
+ }
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_packets_in = NULL, *rd_packets_out = NULL, *rd_packets_m_in = NULL, *rd_packets_m_out = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost("system",
+ "packets",
+ NULL,
+ "network",
+ NULL,
+ "Network Packets",
+ "packets/s",
+ "freebsd.plugin",
+ "getifaddrs",
+ NETDATA_CHART_PRIO_SYSTEM_PACKETS,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_packets_in = rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_packets_out = rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_packets_m_in = rrddim_add(st, "multicast_received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_packets_m_out = rrddim_add(st, "multicast_sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_packets_in, iftot.ift_ipackets);
+ rrddim_set_by_pointer(st, rd_packets_out, iftot.ift_opackets);
+ rrddim_set_by_pointer(st, rd_packets_m_in, iftot.ift_imcasts);
+ rrddim_set_by_pointer(st, rd_packets_m_out, iftot.ift_omcasts);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_bandwidth_ipv4)) {
+ iftot.ift_ibytes = iftot.ift_obytes = 0;
+ for (ifa = ifap; ifa; ifa = ifa->ifa_next) {
+ if (ifa->ifa_addr->sa_family != AF_INET)
+ continue;
+ iftot.ift_ibytes += IFA_DATA(ibytes);
+ iftot.ift_obytes += IFA_DATA(obytes);
+ }
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost("system",
+ "ipv4",
+ NULL,
+ "network",
+ NULL,
+ "IPv4 Bandwidth",
+ "kilobits/s",
+ "freebsd.plugin",
+ "getifaddrs",
+ NETDATA_CHART_PRIO_SYSTEM_IPV4,
+ update_every,
+ RRDSET_TYPE_AREA
+ );
+
+ rd_in = rrddim_add(st, "InOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st, "OutOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_in, iftot.ift_ibytes);
+ rrddim_set_by_pointer(st, rd_out, iftot.ift_obytes);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_bandwidth_ipv6)) {
+ iftot.ift_ibytes = iftot.ift_obytes = 0;
+ for (ifa = ifap; ifa; ifa = ifa->ifa_next) {
+ if (ifa->ifa_addr->sa_family != AF_INET6)
+ continue;
+ iftot.ift_ibytes += IFA_DATA(ibytes);
+ iftot.ift_obytes += IFA_DATA(obytes);
+ }
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost("system",
+ "ipv6",
+ NULL,
+ "network",
+ NULL,
+ "IPv6 Bandwidth",
+ "kilobits/s",
+ "freebsd.plugin",
+ "getifaddrs",
+ NETDATA_CHART_PRIO_SYSTEM_IPV6,
+ update_every,
+ RRDSET_TYPE_AREA
+ );
+
+ rd_in = rrddim_add(st, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_in, iftot.ift_ibytes);
+ rrddim_set_by_pointer(st, rd_out, iftot.ift_obytes);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ network_interfaces_found = 0;
+
+ for (ifa = ifap; ifa; ifa = ifa->ifa_next) {
+ if (ifa->ifa_addr->sa_family != AF_LINK)
+ continue;
+
+ struct cgroup_network_interface *ifm = get_network_interface(ifa->ifa_name);
+ ifm->updated = 1;
+ network_interfaces_found++;
+
+ if (unlikely(!ifm->configured)) {
+ char var_name[4096 + 1];
+
+ // this is the first time we see this network interface
+
+ // remember we configured it
+ ifm->configured = 1;
+
+ ifm->enabled = enable_new_interfaces;
+
+ if (likely(ifm->enabled))
+ ifm->enabled = !simple_pattern_matches(excluded_interfaces, ifa->ifa_name);
+
+ snprintfz(var_name, 4096, "%s:%s", CONFIG_SECTION_GETIFADDRS, ifa->ifa_name);
+ ifm->enabled = config_get_boolean_ondemand(var_name, "enabled", ifm->enabled);
+
+ if (unlikely(ifm->enabled == CONFIG_BOOLEAN_NO))
+ continue;
+
+ ifm->do_bandwidth = config_get_boolean_ondemand(var_name, "bandwidth", do_bandwidth);
+ ifm->do_packets = config_get_boolean_ondemand(var_name, "packets", do_packets);
+ ifm->do_errors = config_get_boolean_ondemand(var_name, "errors", do_errors);
+ ifm->do_drops = config_get_boolean_ondemand(var_name, "drops", do_drops);
+ ifm->do_events = config_get_boolean_ondemand(var_name, "events", do_events);
+ }
+
+ if (unlikely(!ifm->enabled))
+ continue;
+
+ // --------------------------------------------------------------------
+
+ if (ifm->do_bandwidth == CONFIG_BOOLEAN_YES || (ifm->do_bandwidth == CONFIG_BOOLEAN_AUTO &&
+ (IFA_DATA(ibytes) || IFA_DATA(obytes)))) {
+ if (unlikely(!ifm->st_bandwidth)) {
+ ifm->st_bandwidth = rrdset_create_localhost("net",
+ ifa->ifa_name,
+ NULL,
+ ifa->ifa_name,
+ "net.net",
+ "Bandwidth",
+ "kilobits/s",
+ "freebsd.plugin",
+ "getifaddrs",
+ NETDATA_CHART_PRIO_FIRST_NET_IFACE,
+ update_every,
+ RRDSET_TYPE_AREA
+ );
+
+ ifm->rd_bandwidth_in = rrddim_add(ifm->st_bandwidth, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ ifm->rd_bandwidth_out = rrddim_add(ifm->st_bandwidth, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(ifm->st_bandwidth);
+
+ rrddim_set_by_pointer(ifm->st_bandwidth, ifm->rd_bandwidth_in, IFA_DATA(ibytes));
+ rrddim_set_by_pointer(ifm->st_bandwidth, ifm->rd_bandwidth_out, IFA_DATA(obytes));
+ rrdset_done(ifm->st_bandwidth);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (ifm->do_packets == CONFIG_BOOLEAN_YES || (ifm->do_packets == CONFIG_BOOLEAN_AUTO &&
+ (IFA_DATA(ipackets) || IFA_DATA(opackets) || IFA_DATA(imcasts) || IFA_DATA(omcasts)))) {
+ if (unlikely(!ifm->st_packets)) {
+ ifm->st_packets = rrdset_create_localhost("net_packets",
+ ifa->ifa_name,
+ NULL,
+ ifa->ifa_name,
+ "net.packets",
+ "Packets",
+ "packets/s",
+ "freebsd.plugin",
+ "getifaddrs",
+ NETDATA_CHART_PRIO_FIRST_NET_PACKETS,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(ifm->st_packets, RRDSET_FLAG_DETAIL);
+
+ ifm->rd_packets_in = rrddim_add(ifm->st_packets, "received", NULL, 1, 1,
+ RRD_ALGORITHM_INCREMENTAL);
+ ifm->rd_packets_out = rrddim_add(ifm->st_packets, "sent", NULL, -1, 1,
+ RRD_ALGORITHM_INCREMENTAL);
+ ifm->rd_packets_m_in = rrddim_add(ifm->st_packets, "multicast_received", NULL, 1, 1,
+ RRD_ALGORITHM_INCREMENTAL);
+ ifm->rd_packets_m_out = rrddim_add(ifm->st_packets, "multicast_sent", NULL, -1, 1,
+ RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(ifm->st_packets);
+
+ rrddim_set_by_pointer(ifm->st_packets, ifm->rd_packets_in, IFA_DATA(ipackets));
+ rrddim_set_by_pointer(ifm->st_packets, ifm->rd_packets_out, IFA_DATA(opackets));
+ rrddim_set_by_pointer(ifm->st_packets, ifm->rd_packets_m_in, IFA_DATA(imcasts));
+ rrddim_set_by_pointer(ifm->st_packets, ifm->rd_packets_m_out, IFA_DATA(omcasts));
+ rrdset_done(ifm->st_packets);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (ifm->do_errors == CONFIG_BOOLEAN_YES || (ifm->do_errors == CONFIG_BOOLEAN_AUTO &&
+ (IFA_DATA(ierrors) || IFA_DATA(oerrors)))) {
+ if (unlikely(!ifm->st_errors)) {
+ ifm->st_errors = rrdset_create_localhost("net_errors",
+ ifa->ifa_name,
+ NULL,
+ ifa->ifa_name,
+ "net.errors",
+ "Interface Errors",
+ "errors/s",
+ "freebsd.plugin",
+ "getifaddrs",
+ NETDATA_CHART_PRIO_FIRST_NET_ERRORS,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(ifm->st_errors, RRDSET_FLAG_DETAIL);
+
+ ifm->rd_errors_in = rrddim_add(ifm->st_errors, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ ifm->rd_errors_out = rrddim_add(ifm->st_errors, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(ifm->st_errors);
+
+ rrddim_set_by_pointer(ifm->st_errors, ifm->rd_errors_in, IFA_DATA(ierrors));
+ rrddim_set_by_pointer(ifm->st_errors, ifm->rd_errors_out, IFA_DATA(oerrors));
+ rrdset_done(ifm->st_errors);
+ }
+ // --------------------------------------------------------------------
+
+ if (ifm->do_drops == CONFIG_BOOLEAN_YES || (ifm->do_drops == CONFIG_BOOLEAN_AUTO &&
+ (IFA_DATA(iqdrops)
+ #if __FreeBSD__ >= 11
+ || IFA_DATA(oqdrops)
+#endif
+ ))) {
+ if (unlikely(!ifm->st_drops)) {
+ ifm->st_drops = rrdset_create_localhost("net_drops",
+ ifa->ifa_name,
+ NULL,
+ ifa->ifa_name,
+ "net.drops",
+ "Interface Drops",
+ "drops/s",
+ "freebsd.plugin",
+ "getifaddrs",
+ NETDATA_CHART_PRIO_FIRST_NET_DROPS,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(ifm->st_drops, RRDSET_FLAG_DETAIL);
+
+ ifm->rd_drops_in = rrddim_add(ifm->st_drops, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+#if __FreeBSD__ >= 11
+ ifm->rd_drops_out = rrddim_add(ifm->st_drops, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+#endif
+ } else
+ rrdset_next(ifm->st_drops);
+
+ rrddim_set_by_pointer(ifm->st_drops, ifm->rd_drops_in, IFA_DATA(iqdrops));
+#if __FreeBSD__ >= 11
+ rrddim_set_by_pointer(ifm->st_drops, ifm->rd_drops_out, IFA_DATA(oqdrops));
+#endif
+ rrdset_done(ifm->st_drops);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (ifm->do_events == CONFIG_BOOLEAN_YES || (ifm->do_events == CONFIG_BOOLEAN_AUTO &&
+ IFA_DATA(collisions))) {
+ if (unlikely(!ifm->st_events)) {
+ ifm->st_events = rrdset_create_localhost("net_events",
+ ifa->ifa_name,
+ NULL,
+ ifa->ifa_name,
+ "net.events",
+ "Network Interface Events",
+ "events/s",
+ "freebsd.plugin",
+ "getifaddrs",
+ NETDATA_CHART_PRIO_FIRST_NET_EVENTS,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(ifm->st_events, RRDSET_FLAG_DETAIL);
+
+ ifm->rd_events_coll = rrddim_add(ifm->st_events, "collisions", NULL, -1, 1,
+ RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(ifm->st_events);
+
+ rrddim_set_by_pointer(ifm->st_events, ifm->rd_events_coll, IFA_DATA(collisions));
+ rrdset_done(ifm->st_events);
+ }
+ }
+
+ freeifaddrs(ifap);
+ }
+ } else {
+ error("DISABLED: getifaddrs module");
+ return 1;
+ }
+
+ network_interfaces_cleanup();
+
+ return 0;
+}
diff --git a/collectors/freebsd.plugin/freebsd_getmntinfo.c b/collectors/freebsd.plugin/freebsd_getmntinfo.c
new file mode 100644
index 000000000..c86f23166
--- /dev/null
+++ b/collectors/freebsd.plugin/freebsd_getmntinfo.c
@@ -0,0 +1,301 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_freebsd.h"
+
+#include <sys/mount.h>
+
+struct mount_point {
+ char *name;
+ uint32_t hash;
+ size_t len;
+
+ // flags
+ int configured;
+ int enabled;
+ int updated;
+
+ int do_space;
+ int do_inodes;
+
+ size_t collected; // the number of times this has been collected
+
+ // charts and dimensions
+
+ RRDSET *st_space;
+ RRDDIM *rd_space_used;
+ RRDDIM *rd_space_avail;
+ RRDDIM *rd_space_reserved;
+
+ RRDSET *st_inodes;
+ RRDDIM *rd_inodes_used;
+ RRDDIM *rd_inodes_avail;
+
+ struct mount_point *next;
+};
+
+static struct mount_point *mount_points_root = NULL, *mount_points_last_used = NULL;
+
+static size_t mount_points_added = 0, mount_points_found = 0;
+
+static void mount_point_free(struct mount_point *m) {
+ if (likely(m->st_space))
+ rrdset_is_obsolete(m->st_space);
+ if (likely(m->st_inodes))
+ rrdset_is_obsolete(m->st_inodes);
+
+ mount_points_added--;
+ freez(m->name);
+ freez(m);
+}
+
+static void mount_points_cleanup() {
+ if (likely(mount_points_found == mount_points_added)) return;
+
+ struct mount_point *m = mount_points_root, *last = NULL;
+ while(m) {
+ if (unlikely(!m->updated)) {
+ // info("Removing mount point '%s', linked after '%s'", m->name, last?last->name:"ROOT");
+
+ if (mount_points_last_used == m)
+ mount_points_last_used = last;
+
+ struct mount_point *t = m;
+
+ if (m == mount_points_root || !last)
+ mount_points_root = m = m->next;
+
+ else
+ last->next = m = m->next;
+
+ t->next = NULL;
+ mount_point_free(t);
+ }
+ else {
+ last = m;
+ m->updated = 0;
+ m = m->next;
+ }
+ }
+}
+
+static struct mount_point *get_mount_point(const char *name) {
+ struct mount_point *m;
+
+ uint32_t hash = simple_hash(name);
+
+ // search it, from the last position to the end
+ for(m = mount_points_last_used ; m ; m = m->next) {
+ if (unlikely(hash == m->hash && !strcmp(name, m->name))) {
+ mount_points_last_used = m->next;
+ return m;
+ }
+ }
+
+ // search it from the beginning to the last position we used
+ for(m = mount_points_root ; m != mount_points_last_used ; m = m->next) {
+ if (unlikely(hash == m->hash && !strcmp(name, m->name))) {
+ mount_points_last_used = m->next;
+ return m;
+ }
+ }
+
+ // create a new one
+ m = callocz(1, sizeof(struct mount_point));
+ m->name = strdupz(name);
+ m->hash = simple_hash(m->name);
+ m->len = strlen(m->name);
+ mount_points_added++;
+
+ // link it to the end
+ if (mount_points_root) {
+ struct mount_point *e;
+ for(e = mount_points_root; e->next ; e = e->next) ;
+ e->next = m;
+ }
+ else
+ mount_points_root = m;
+
+ return m;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// getmntinfo
+
+int do_getmntinfo(int update_every, usec_t dt) {
+ (void)dt;
+
+#define DELAULT_EXCLUDED_PATHS "/proc/*"
+// taken from gnulib/mountlist.c and shortened to FreeBSD related fstypes
+#define DEFAULT_EXCLUDED_FILESYSTEMS "autofs procfs subfs devfs none"
+#define CONFIG_SECTION_GETMNTINFO "plugin:freebsd:getmntinfo"
+
+ static int enable_new_mount_points = -1;
+ static int do_space = -1, do_inodes = -1;
+ static SIMPLE_PATTERN *excluded_mountpoints = NULL;
+ static SIMPLE_PATTERN *excluded_filesystems = NULL;
+
+ if (unlikely(enable_new_mount_points == -1)) {
+ enable_new_mount_points = config_get_boolean_ondemand(CONFIG_SECTION_GETMNTINFO,
+ "enable new mount points detected at runtime",
+ CONFIG_BOOLEAN_AUTO);
+
+ do_space = config_get_boolean_ondemand(CONFIG_SECTION_GETMNTINFO, "space usage for all disks", CONFIG_BOOLEAN_AUTO);
+ do_inodes = config_get_boolean_ondemand(CONFIG_SECTION_GETMNTINFO, "inodes usage for all disks", CONFIG_BOOLEAN_AUTO);
+
+ excluded_mountpoints = simple_pattern_create(
+ config_get(CONFIG_SECTION_GETMNTINFO, "exclude space metrics on paths",
+ DELAULT_EXCLUDED_PATHS)
+ , NULL
+ , SIMPLE_PATTERN_EXACT
+ );
+
+ excluded_filesystems = simple_pattern_create(
+ config_get(CONFIG_SECTION_GETMNTINFO, "exclude space metrics on filesystems",
+ DEFAULT_EXCLUDED_FILESYSTEMS)
+ , NULL
+ , SIMPLE_PATTERN_EXACT
+ );
+ }
+
+ if (likely(do_space || do_inodes)) {
+ struct statfs *mntbuf;
+ int mntsize;
+
+ // there is no mount info in sysctl MIBs
+ if (unlikely(!(mntsize = getmntinfo(&mntbuf, MNT_NOWAIT)))) {
+ error("FREEBSD: getmntinfo() failed");
+ do_space = 0;
+ error("DISABLED: disk_space.* charts");
+ do_inodes = 0;
+ error("DISABLED: disk_inodes.* charts");
+ error("DISABLED: getmntinfo module");
+ return 1;
+ } else {
+ int i;
+
+ mount_points_found = 0;
+
+ for (i = 0; i < mntsize; i++) {
+ char title[4096 + 1];
+
+ struct mount_point *m = get_mount_point(mntbuf[i].f_mntonname);
+ m->updated = 1;
+ mount_points_found++;
+
+ if (unlikely(!m->configured)) {
+ char var_name[4096 + 1];
+
+ // this is the first time we see this filesystem
+
+ // remember we configured it
+ m->configured = 1;
+
+ m->enabled = enable_new_mount_points;
+
+ if (likely(m->enabled))
+ m->enabled = !(simple_pattern_matches(excluded_mountpoints, mntbuf[i].f_mntonname)
+ || simple_pattern_matches(excluded_filesystems, mntbuf[i].f_fstypename));
+
+ snprintfz(var_name, 4096, "%s:%s", CONFIG_SECTION_GETMNTINFO, mntbuf[i].f_mntonname);
+ m->enabled = config_get_boolean_ondemand(var_name, "enabled", m->enabled);
+
+ if (unlikely(m->enabled == CONFIG_BOOLEAN_NO))
+ continue;
+
+ m->do_space = config_get_boolean_ondemand(var_name, "space usage", do_space);
+ m->do_inodes = config_get_boolean_ondemand(var_name, "inodes usage", do_inodes);
+ }
+
+ if (unlikely(!m->enabled))
+ continue;
+
+ if (unlikely(mntbuf[i].f_flags & MNT_RDONLY && !m->collected))
+ continue;
+
+ // --------------------------------------------------------------------------
+
+ int rendered = 0;
+
+ if (m->do_space == CONFIG_BOOLEAN_YES || (m->do_space == CONFIG_BOOLEAN_AUTO && (mntbuf[i].f_blocks > 2))) {
+ if (unlikely(!m->st_space)) {
+ snprintfz(title, 4096, "Disk Space Usage for %s [%s]",
+ mntbuf[i].f_mntonname, mntbuf[i].f_mntfromname);
+ m->st_space = rrdset_create_localhost("disk_space",
+ mntbuf[i].f_mntonname,
+ NULL,
+ mntbuf[i].f_mntonname,
+ "disk.space",
+ title,
+ "GB",
+ "freebsd.plugin",
+ "getmntinfo",
+ NETDATA_CHART_PRIO_DISKSPACE_SPACE,
+ update_every,
+ RRDSET_TYPE_STACKED
+ );
+
+ m->rd_space_avail = rrddim_add(m->st_space, "avail", NULL,
+ mntbuf[i].f_bsize, GIGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
+ m->rd_space_used = rrddim_add(m->st_space, "used", NULL,
+ mntbuf[i].f_bsize, GIGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
+ m->rd_space_reserved = rrddim_add(m->st_space, "reserved_for_root", "reserved for root",
+ mntbuf[i].f_bsize, GIGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
+ } else
+ rrdset_next(m->st_space);
+
+ rrddim_set_by_pointer(m->st_space, m->rd_space_avail, (collected_number) mntbuf[i].f_bavail);
+ rrddim_set_by_pointer(m->st_space, m->rd_space_used, (collected_number) (mntbuf[i].f_blocks -
+ mntbuf[i].f_bfree));
+ rrddim_set_by_pointer(m->st_space, m->rd_space_reserved, (collected_number) (mntbuf[i].f_bfree -
+ mntbuf[i].f_bavail));
+ rrdset_done(m->st_space);
+
+ rendered++;
+ }
+
+ // --------------------------------------------------------------------------
+
+ if (m->do_inodes == CONFIG_BOOLEAN_YES || (m->do_inodes == CONFIG_BOOLEAN_AUTO && (mntbuf[i].f_files > 1))) {
+ if (unlikely(!m->st_inodes)) {
+ snprintfz(title, 4096, "Disk Files (inodes) Usage for %s [%s]",
+ mntbuf[i].f_mntonname, mntbuf[i].f_mntfromname);
+ m->st_inodes = rrdset_create_localhost("disk_inodes",
+ mntbuf[i].f_mntonname,
+ NULL,
+ mntbuf[i].f_mntonname,
+ "disk.inodes",
+ title,
+ "Inodes",
+ "freebsd.plugin",
+ "getmntinfo",
+ NETDATA_CHART_PRIO_DISKSPACE_INODES,
+ update_every,
+ RRDSET_TYPE_STACKED
+ );
+
+ m->rd_inodes_avail = rrddim_add(m->st_inodes, "avail", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ m->rd_inodes_used = rrddim_add(m->st_inodes, "used", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ } else
+ rrdset_next(m->st_inodes);
+
+ rrddim_set_by_pointer(m->st_inodes, m->rd_inodes_avail, (collected_number) mntbuf[i].f_ffree);
+ rrddim_set_by_pointer(m->st_inodes, m->rd_inodes_used, (collected_number) (mntbuf[i].f_files -
+ mntbuf[i].f_ffree));
+ rrdset_done(m->st_inodes);
+
+ rendered++;
+ }
+
+ if (likely(rendered))
+ m->collected++;
+ }
+ }
+ } else {
+ error("DISABLED: getmntinfo module");
+ return 1;
+ }
+
+ mount_points_cleanup();
+
+ return 0;
+}
diff --git a/collectors/freebsd.plugin/freebsd_ipfw.c b/collectors/freebsd.plugin/freebsd_ipfw.c
new file mode 100644
index 000000000..c256da8b3
--- /dev/null
+++ b/collectors/freebsd.plugin/freebsd_ipfw.c
@@ -0,0 +1,372 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_freebsd.h"
+
+#include <netinet/ip_fw.h>
+
+#define FREE_MEM_THRESHOLD 10000 // number of unused chunks that trigger memory freeing
+
+#define COMMON_IPFW_ERROR() error("DISABLED: ipfw.packets chart"); \
+ error("DISABLED: ipfw.bytes chart"); \
+ error("DISABLED: ipfw.dyn_active chart"); \
+ error("DISABLED: ipfw.dyn_expired chart"); \
+ error("DISABLED: ipfw.mem chart");
+
+// --------------------------------------------------------------------------------------------------------------------
+// ipfw
+
+int do_ipfw(int update_every, usec_t dt) {
+ (void)dt;
+#if __FreeBSD__ >= 11
+
+ static int do_static = -1, do_dynamic = -1, do_mem = -1;
+
+ if (unlikely(do_static == -1)) {
+ do_static = config_get_boolean("plugin:freebsd:ipfw", "counters for static rules", 1);
+ do_dynamic = config_get_boolean("plugin:freebsd:ipfw", "number of dynamic rules", 1);
+ do_mem = config_get_boolean("plugin:freebsd:ipfw", "allocated memory", 1);
+ }
+
+ // variables for getting ipfw configuration
+
+ int error;
+ static int ipfw_socket = -1;
+ static ipfw_cfg_lheader *cfg = NULL;
+ ip_fw3_opheader *op3 = NULL;
+ static socklen_t *optlen = NULL, cfg_size = 0;
+
+ // variables for static rules handling
+
+ ipfw_obj_ctlv *ctlv = NULL;
+ ipfw_obj_tlv *rbase = NULL;
+ int rcnt = 0;
+
+ int n, seen;
+ struct ip_fw_rule *rule;
+ struct ip_fw_bcounter *cntr;
+ int c = 0;
+
+ char rule_num_str[12];
+
+ // variables for dynamic rules handling
+
+ caddr_t dynbase = NULL;
+ size_t dynsz = 0;
+ size_t readsz = sizeof(*cfg);;
+ int ttype = 0;
+ ipfw_obj_tlv *tlv;
+ ipfw_dyn_rule *dyn_rule;
+ uint16_t rulenum, prev_rulenum = IPFW_DEFAULT_RULE;
+ unsigned srn, static_rules_num = 0;
+ static size_t dyn_rules_num_size = 0;
+
+ static struct dyn_rule_num {
+ uint16_t rule_num;
+ uint32_t active_rules;
+ uint32_t expired_rules;
+ } *dyn_rules_num = NULL;
+
+ uint32_t *dyn_rules_counter;
+
+ if (likely(do_static | do_dynamic | do_mem)) {
+
+ // initialize the smallest ipfw_cfg_lheader possible
+
+ if (unlikely((optlen == NULL) || (cfg == NULL))) {
+ optlen = reallocz(optlen, sizeof(socklen_t));
+ *optlen = cfg_size = 32;
+ cfg = reallocz(cfg, *optlen);
+ }
+
+ // get socket descriptor and initialize ipfw_cfg_lheader structure
+
+ if (unlikely(ipfw_socket == -1))
+ ipfw_socket = socket(AF_INET, SOCK_RAW, IPPROTO_RAW);
+ if (unlikely(ipfw_socket == -1)) {
+ error("FREEBSD: can't get socket for ipfw configuration");
+ error("FREEBSD: run netdata as root to get access to ipfw data");
+ COMMON_IPFW_ERROR();
+ return 1;
+ }
+
+ bzero(cfg, 32);
+ cfg->flags = IPFW_CFG_GET_STATIC | IPFW_CFG_GET_COUNTERS | IPFW_CFG_GET_STATES;
+ op3 = &cfg->opheader;
+ op3->opcode = IP_FW_XGET;
+
+ // get ifpw configuration size than get configuration
+
+ *optlen = cfg_size;
+ error = getsockopt(ipfw_socket, IPPROTO_IP, IP_FW3, op3, optlen);
+ if (error)
+ if (errno != ENOMEM) {
+ error("FREEBSD: ipfw socket reading error");
+ COMMON_IPFW_ERROR();
+ return 1;
+ }
+ if ((cfg->size > cfg_size) || ((cfg_size - cfg->size) > sizeof(struct dyn_rule_num) * FREE_MEM_THRESHOLD)) {
+ *optlen = cfg_size = cfg->size;
+ cfg = reallocz(cfg, *optlen);
+ bzero(cfg, 32);
+ cfg->flags = IPFW_CFG_GET_STATIC | IPFW_CFG_GET_COUNTERS | IPFW_CFG_GET_STATES;
+ op3 = &cfg->opheader;
+ op3->opcode = IP_FW_XGET;
+ error = getsockopt(ipfw_socket, IPPROTO_IP, IP_FW3, op3, optlen);
+ if (error) {
+ error("FREEBSD: ipfw socket reading error");
+ COMMON_IPFW_ERROR();
+ return 1;
+ }
+ }
+
+ // go through static rules configuration structures
+
+ ctlv = (ipfw_obj_ctlv *) (cfg + 1);
+
+ if (cfg->flags & IPFW_CFG_GET_STATIC) {
+ /* We've requested static rules */
+ if (ctlv->head.type == IPFW_TLV_TBLNAME_LIST) {
+ readsz += ctlv->head.length;
+ ctlv = (ipfw_obj_ctlv *) ((caddr_t) ctlv +
+ ctlv->head.length);
+ }
+
+ if (ctlv->head.type == IPFW_TLV_RULE_LIST) {
+ rbase = (ipfw_obj_tlv *) (ctlv + 1);
+ rcnt = ctlv->count;
+ readsz += ctlv->head.length;
+ ctlv = (ipfw_obj_ctlv *) ((caddr_t) ctlv + ctlv->head.length);
+ }
+ }
+
+ if ((cfg->flags & IPFW_CFG_GET_STATES) && (readsz != *optlen)) {
+ /* We may have some dynamic states */
+ dynsz = *optlen - readsz;
+ /* Skip empty header */
+ if (dynsz != sizeof(ipfw_obj_ctlv))
+ dynbase = (caddr_t) ctlv;
+ else
+ dynsz = 0;
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_mem)) {
+ static RRDSET *st_mem = NULL;
+ static RRDDIM *rd_dyn_mem = NULL;
+ static RRDDIM *rd_stat_mem = NULL;
+
+ if (unlikely(!st_mem)) {
+ st_mem = rrdset_create_localhost("ipfw",
+ "mem",
+ NULL,
+ "memory allocated",
+ NULL,
+ "Memory allocated by rules",
+ "bytes",
+ "freebsd.plugin",
+ "ipfw",
+ NETDATA_CHART_PRIO_IPFW_MEM,
+ update_every,
+ RRDSET_TYPE_STACKED
+ );
+ rrdset_flag_set(st_mem, RRDSET_FLAG_DETAIL);
+
+ rd_dyn_mem = rrddim_add(st_mem, "dynamic", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rd_stat_mem = rrddim_add(st_mem, "static", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ } else
+ rrdset_next(st_mem);
+
+ rrddim_set_by_pointer(st_mem, rd_dyn_mem, dynsz);
+ rrddim_set_by_pointer(st_mem, rd_stat_mem, *optlen - dynsz);
+ rrdset_done(st_mem);
+ }
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st_packets = NULL, *st_bytes = NULL;
+ RRDDIM *rd_packets = NULL, *rd_bytes = NULL;
+
+ if (likely(do_static || do_dynamic)) {
+ if (likely(do_static)) {
+ if (unlikely(!st_packets))
+ st_packets = rrdset_create_localhost("ipfw",
+ "packets",
+ NULL,
+ "static rules",
+ NULL,
+ "Packets",
+ "packets/s",
+ "freebsd.plugin",
+ "ipfw",
+ NETDATA_CHART_PRIO_IPFW_PACKETS,
+ update_every,
+ RRDSET_TYPE_STACKED
+ );
+ else
+ rrdset_next(st_packets);
+
+ if (unlikely(!st_bytes))
+ st_bytes = rrdset_create_localhost("ipfw",
+ "bytes",
+ NULL,
+ "static rules",
+ NULL,
+ "Bytes",
+ "bytes/s",
+ "freebsd.plugin",
+ "ipfw",
+ NETDATA_CHART_PRIO_IPFW_BYTES,
+ update_every,
+ RRDSET_TYPE_STACKED
+ );
+ else
+ rrdset_next(st_bytes);
+ }
+
+ for (n = seen = 0; n < rcnt; n++, rbase = (ipfw_obj_tlv *) ((caddr_t) rbase + rbase->length)) {
+ cntr = (struct ip_fw_bcounter *) (rbase + 1);
+ rule = (struct ip_fw_rule *) ((caddr_t) cntr + cntr->size);
+ if (rule->rulenum != prev_rulenum)
+ static_rules_num++;
+ if (rule->rulenum > IPFW_DEFAULT_RULE)
+ break;
+
+ if (likely(do_static)) {
+ sprintf(rule_num_str, "%d_%d", rule->rulenum, rule->id);
+
+ rd_packets = rrddim_find(st_packets, rule_num_str);
+ if (unlikely(!rd_packets))
+ rd_packets = rrddim_add(st_packets, rule_num_str, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_set_by_pointer(st_packets, rd_packets, cntr->pcnt);
+
+ rd_bytes = rrddim_find(st_bytes, rule_num_str);
+ if (unlikely(!rd_bytes))
+ rd_bytes = rrddim_add(st_bytes, rule_num_str, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_set_by_pointer(st_bytes, rd_bytes, cntr->bcnt);
+ }
+
+ c += rbase->length;
+ seen++;
+ }
+
+ if (likely(do_static)) {
+ rrdset_done(st_packets);
+ rrdset_done(st_bytes);
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ // go through dynamic rules configuration structures
+
+ if (likely(do_dynamic && (dynsz > 0))) {
+ if ((dyn_rules_num_size < sizeof(struct dyn_rule_num) * static_rules_num) ||
+ ((dyn_rules_num_size - sizeof(struct dyn_rule_num) * static_rules_num) >
+ sizeof(struct dyn_rule_num) * FREE_MEM_THRESHOLD)) {
+ dyn_rules_num_size = sizeof(struct dyn_rule_num) * static_rules_num;
+ dyn_rules_num = reallocz(dyn_rules_num, dyn_rules_num_size);
+ }
+ bzero(dyn_rules_num, sizeof(struct dyn_rule_num) * static_rules_num);
+ dyn_rules_num->rule_num = IPFW_DEFAULT_RULE;
+
+ if (dynsz > 0 && ctlv->head.type == IPFW_TLV_DYNSTATE_LIST) {
+ dynbase += sizeof(*ctlv);
+ dynsz -= sizeof(*ctlv);
+ ttype = IPFW_TLV_DYN_ENT;
+ }
+
+ while (dynsz > 0) {
+ tlv = (ipfw_obj_tlv *) dynbase;
+ if (tlv->type != ttype)
+ break;
+
+ dyn_rule = (ipfw_dyn_rule *) (tlv + 1);
+ bcopy(&dyn_rule->rule, &rulenum, sizeof(rulenum));
+
+ for (srn = 0; srn < (static_rules_num - 1); srn++) {
+ if (dyn_rule->expire > 0)
+ dyn_rules_counter = &dyn_rules_num[srn].active_rules;
+ else
+ dyn_rules_counter = &dyn_rules_num[srn].expired_rules;
+ if (dyn_rules_num[srn].rule_num == rulenum) {
+ (*dyn_rules_counter)++;
+ break;
+ }
+ if (dyn_rules_num[srn].rule_num == IPFW_DEFAULT_RULE) {
+ dyn_rules_num[srn].rule_num = rulenum;
+ dyn_rules_num[srn + 1].rule_num = IPFW_DEFAULT_RULE;
+ (*dyn_rules_counter)++;
+ break;
+ }
+ }
+
+ dynsz -= tlv->length;
+ dynbase += tlv->length;
+ }
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st_active = NULL, *st_expired = NULL;
+ RRDDIM *rd_active = NULL, *rd_expired = NULL;
+
+ if (unlikely(!st_active))
+ st_active = rrdset_create_localhost("ipfw",
+ "active",
+ NULL,
+ "dynamic_rules",
+ NULL,
+ "Active rules",
+ "rules",
+ "freebsd.plugin",
+ "ipfw",
+ NETDATA_CHART_PRIO_IPFW_ACTIVE,
+ update_every,
+ RRDSET_TYPE_STACKED
+ );
+ else
+ rrdset_next(st_active);
+
+ if (unlikely(!st_expired))
+ st_expired = rrdset_create_localhost("ipfw",
+ "expired",
+ NULL,
+ "dynamic_rules",
+ NULL,
+ "Expired rules",
+ "rules",
+ "freebsd.plugin",
+ "ipfw",
+ NETDATA_CHART_PRIO_IPFW_EXPIRED,
+ update_every,
+ RRDSET_TYPE_STACKED
+ );
+ else
+ rrdset_next(st_expired);
+
+ for (srn = 0; (srn < (static_rules_num - 1)) && (dyn_rules_num[srn].rule_num != IPFW_DEFAULT_RULE); srn++) {
+ sprintf(rule_num_str, "%d", dyn_rules_num[srn].rule_num);
+
+ rd_active = rrddim_find(st_active, rule_num_str);
+ if (unlikely(!rd_active))
+ rd_active = rrddim_add(st_active, rule_num_str, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_set_by_pointer(st_active, rd_active, dyn_rules_num[srn].active_rules);
+
+ rd_expired = rrddim_find(st_expired, rule_num_str);
+ if (unlikely(!rd_expired))
+ rd_expired = rrddim_add(st_expired, rule_num_str, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_set_by_pointer(st_expired, rd_expired, dyn_rules_num[srn].expired_rules);
+ }
+
+ rrdset_done(st_active);
+ rrdset_done(st_expired);
+ }
+ }
+
+ return 0;
+#else
+ error("FREEBSD: ipfw charts supported for FreeBSD 11.0 and newer releases only");
+ COMMON_IPFW_ERROR();
+ return 1;
+#endif
+}
diff --git a/collectors/freebsd.plugin/freebsd_kstat_zfs.c b/collectors/freebsd.plugin/freebsd_kstat_zfs.c
new file mode 100644
index 000000000..93dfc320b
--- /dev/null
+++ b/collectors/freebsd.plugin/freebsd_kstat_zfs.c
@@ -0,0 +1,300 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_freebsd.h"
+#include "collectors/proc.plugin/zfs_common.h"
+
+extern struct arcstats arcstats;
+
+// --------------------------------------------------------------------------------------------------------------------
+// kstat.zfs.misc.arcstats
+
+int do_kstat_zfs_misc_arcstats(int update_every, usec_t dt) {
+ (void)dt;
+
+ unsigned long long l2_size;
+ size_t uint64_t_size = sizeof(uint64_t);
+ static struct mibs {
+ int hits[5];
+ int misses[5];
+ int demand_data_hits[5];
+ int demand_data_misses[5];
+ int demand_metadata_hits[5];
+ int demand_metadata_misses[5];
+ int prefetch_data_hits[5];
+ int prefetch_data_misses[5];
+ int prefetch_metadata_hits[5];
+ int prefetch_metadata_misses[5];
+ int mru_hits[5];
+ int mru_ghost_hits[5];
+ int mfu_hits[5];
+ int mfu_ghost_hits[5];
+ int deleted[5];
+ int mutex_miss[5];
+ int evict_skip[5];
+ int evict_not_enough[5];
+ int evict_l2_cached[5];
+ int evict_l2_eligible[5];
+ int evict_l2_ineligible[5];
+ int evict_l2_skip[5];
+ int hash_elements[5];
+ int hash_elements_max[5];
+ int hash_collisions[5];
+ int hash_chains[5];
+ int hash_chain_max[5];
+ int p[5];
+ int c[5];
+ int c_min[5];
+ int c_max[5];
+ int size[5];
+ int hdr_size[5];
+ int data_size[5];
+ int metadata_size[5];
+ int other_size[5];
+ int anon_size[5];
+ int anon_evictable_data[5];
+ int anon_evictable_metadata[5];
+ int mru_size[5];
+ int mru_evictable_data[5];
+ int mru_evictable_metadata[5];
+ int mru_ghost_size[5];
+ int mru_ghost_evictable_data[5];
+ int mru_ghost_evictable_metadata[5];
+ int mfu_size[5];
+ int mfu_evictable_data[5];
+ int mfu_evictable_metadata[5];
+ int mfu_ghost_size[5];
+ int mfu_ghost_evictable_data[5];
+ int mfu_ghost_evictable_metadata[5];
+ int l2_hits[5];
+ int l2_misses[5];
+ int l2_feeds[5];
+ int l2_rw_clash[5];
+ int l2_read_bytes[5];
+ int l2_write_bytes[5];
+ int l2_writes_sent[5];
+ int l2_writes_done[5];
+ int l2_writes_error[5];
+ int l2_writes_lock_retry[5];
+ int l2_evict_lock_retry[5];
+ int l2_evict_reading[5];
+ int l2_evict_l1cached[5];
+ int l2_free_on_write[5];
+ int l2_cdata_free_on_write[5];
+ int l2_abort_lowmem[5];
+ int l2_cksum_bad[5];
+ int l2_io_error[5];
+ int l2_size[5];
+ int l2_asize[5];
+ int l2_hdr_size[5];
+ int l2_compress_successes[5];
+ int l2_compress_zeros[5];
+ int l2_compress_failures[5];
+ int memory_throttle_count[5];
+ int duplicate_buffers[5];
+ int duplicate_buffers_size[5];
+ int duplicate_reads[5];
+ int memory_direct_count[5];
+ int memory_indirect_count[5];
+ int arc_no_grow[5];
+ int arc_tempreserve[5];
+ int arc_loaned_bytes[5];
+ int arc_prune[5];
+ int arc_meta_used[5];
+ int arc_meta_limit[5];
+ int arc_meta_max[5];
+ int arc_meta_min[5];
+ int arc_need_free[5];
+ int arc_sys_free[5];
+ } mibs;
+
+ arcstats.l2exist = -1;
+
+ if(unlikely(sysctlbyname("kstat.zfs.misc.arcstats.l2_size", &l2_size, &uint64_t_size, NULL, 0)))
+ return 0;
+
+ if(likely(l2_size))
+ arcstats.l2exist = 1;
+ else
+ arcstats.l2exist = 0;
+
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.hits", mibs.hits, arcstats.hits);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.misses", mibs.misses, arcstats.misses);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.demand_data_hits", mibs.demand_data_hits, arcstats.demand_data_hits);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.demand_data_misses", mibs.demand_data_misses, arcstats.demand_data_misses);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.demand_metadata_hits", mibs.demand_metadata_hits, arcstats.demand_metadata_hits);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.demand_metadata_misses", mibs.demand_metadata_misses, arcstats.demand_metadata_misses);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.prefetch_data_hits", mibs.prefetch_data_hits, arcstats.prefetch_data_hits);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.prefetch_data_misses", mibs.prefetch_data_misses, arcstats.prefetch_data_misses);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.prefetch_metadata_hits", mibs.prefetch_metadata_hits, arcstats.prefetch_metadata_hits);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.prefetch_metadata_misses", mibs.prefetch_metadata_misses, arcstats.prefetch_metadata_misses);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_hits", mibs.mru_hits, arcstats.mru_hits);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_ghost_hits", mibs.mru_ghost_hits, arcstats.mru_ghost_hits);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_hits", mibs.mfu_hits, arcstats.mfu_hits);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_ghost_hits", mibs.mfu_ghost_hits, arcstats.mfu_ghost_hits);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.deleted", mibs.deleted, arcstats.deleted);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mutex_miss", mibs.mutex_miss, arcstats.mutex_miss);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.evict_skip", mibs.evict_skip, arcstats.evict_skip);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.evict_not_enough", mibs.evict_not_enough, arcstats.evict_not_enough);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.evict_l2_cached", mibs.evict_l2_cached, arcstats.evict_l2_cached);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.evict_l2_eligible", mibs.evict_l2_eligible, arcstats.evict_l2_eligible);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.evict_l2_ineligible", mibs.evict_l2_ineligible, arcstats.evict_l2_ineligible);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.evict_l2_skip", mibs.evict_l2_skip, arcstats.evict_l2_skip);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.hash_elements", mibs.hash_elements, arcstats.hash_elements);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.hash_elements_max", mibs.hash_elements_max, arcstats.hash_elements_max);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.hash_collisions", mibs.hash_collisions, arcstats.hash_collisions);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.hash_chains", mibs.hash_chains, arcstats.hash_chains);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.hash_chain_max", mibs.hash_chain_max, arcstats.hash_chain_max);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.p", mibs.p, arcstats.p);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.c", mibs.c, arcstats.c);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.c_min", mibs.c_min, arcstats.c_min);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.c_max", mibs.c_max, arcstats.c_max);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.size", mibs.size, arcstats.size);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.hdr_size", mibs.hdr_size, arcstats.hdr_size);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.data_size", mibs.data_size, arcstats.data_size);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.metadata_size", mibs.metadata_size, arcstats.metadata_size);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.other_size", mibs.other_size, arcstats.other_size);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.anon_size", mibs.anon_size, arcstats.anon_size);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.anon_evictable_data", mibs.anon_evictable_data, arcstats.anon_evictable_data);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.anon_evictable_metadata", mibs.anon_evictable_metadata, arcstats.anon_evictable_metadata);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_size", mibs.mru_size, arcstats.mru_size);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_evictable_data", mibs.mru_evictable_data, arcstats.mru_evictable_data);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_evictable_metadata", mibs.mru_evictable_metadata, arcstats.mru_evictable_metadata);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_ghost_size", mibs.mru_ghost_size, arcstats.mru_ghost_size);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_ghost_evictable_data", mibs.mru_ghost_evictable_data, arcstats.mru_ghost_evictable_data);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_ghost_evictable_metadata", mibs.mru_ghost_evictable_metadata, arcstats.mru_ghost_evictable_metadata);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_size", mibs.mfu_size, arcstats.mfu_size);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_evictable_data", mibs.mfu_evictable_data, arcstats.mfu_evictable_data);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_evictable_metadata", mibs.mfu_evictable_metadata, arcstats.mfu_evictable_metadata);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_ghost_size", mibs.mfu_ghost_size, arcstats.mfu_ghost_size);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_ghost_evictable_data", mibs.mfu_ghost_evictable_data, arcstats.mfu_ghost_evictable_data);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_ghost_evictable_metadata", mibs.mfu_ghost_evictable_metadata, arcstats.mfu_ghost_evictable_metadata);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_hits", mibs.l2_hits, arcstats.l2_hits);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_misses", mibs.l2_misses, arcstats.l2_misses);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_feeds", mibs.l2_feeds, arcstats.l2_feeds);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_rw_clash", mibs.l2_rw_clash, arcstats.l2_rw_clash);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_read_bytes", mibs.l2_read_bytes, arcstats.l2_read_bytes);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_write_bytes", mibs.l2_write_bytes, arcstats.l2_write_bytes);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_writes_sent", mibs.l2_writes_sent, arcstats.l2_writes_sent);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_writes_done", mibs.l2_writes_done, arcstats.l2_writes_done);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_writes_error", mibs.l2_writes_error, arcstats.l2_writes_error);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_writes_lock_retry", mibs.l2_writes_lock_retry, arcstats.l2_writes_lock_retry);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_evict_lock_retry", mibs.l2_evict_lock_retry, arcstats.l2_evict_lock_retry);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_evict_reading", mibs.l2_evict_reading, arcstats.l2_evict_reading);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_evict_l1cached", mibs.l2_evict_l1cached, arcstats.l2_evict_l1cached);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_free_on_write", mibs.l2_free_on_write, arcstats.l2_free_on_write);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_cdata_free_on_write", mibs.l2_cdata_free_on_write, arcstats.l2_cdata_free_on_write);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_abort_lowmem", mibs.l2_abort_lowmem, arcstats.l2_abort_lowmem);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_cksum_bad", mibs.l2_cksum_bad, arcstats.l2_cksum_bad);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_io_error", mibs.l2_io_error, arcstats.l2_io_error);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_size", mibs.l2_size, arcstats.l2_size);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_asize", mibs.l2_asize, arcstats.l2_asize);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_hdr_size", mibs.l2_hdr_size, arcstats.l2_hdr_size);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_compress_successes", mibs.l2_compress_successes, arcstats.l2_compress_successes);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_compress_zeros", mibs.l2_compress_zeros, arcstats.l2_compress_zeros);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_compress_failures", mibs.l2_compress_failures, arcstats.l2_compress_failures);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.memory_throttle_count", mibs.memory_throttle_count, arcstats.memory_throttle_count);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.duplicate_buffers", mibs.duplicate_buffers, arcstats.duplicate_buffers);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.duplicate_buffers_size", mibs.duplicate_buffers_size, arcstats.duplicate_buffers_size);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.duplicate_reads", mibs.duplicate_reads, arcstats.duplicate_reads);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.memory_direct_count", mibs.memory_direct_count, arcstats.memory_direct_count);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.memory_indirect_count", mibs.memory_indirect_count, arcstats.memory_indirect_count);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_no_grow", mibs.arc_no_grow, arcstats.arc_no_grow);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_tempreserve", mibs.arc_tempreserve, arcstats.arc_tempreserve);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_loaned_bytes", mibs.arc_loaned_bytes, arcstats.arc_loaned_bytes);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_prune", mibs.arc_prune, arcstats.arc_prune);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_meta_used", mibs.arc_meta_used, arcstats.arc_meta_used);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_meta_limit", mibs.arc_meta_limit, arcstats.arc_meta_limit);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_meta_max", mibs.arc_meta_max, arcstats.arc_meta_max);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_meta_min", mibs.arc_meta_min, arcstats.arc_meta_min);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_need_free", mibs.arc_need_free, arcstats.arc_need_free);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_sys_free", mibs.arc_sys_free, arcstats.arc_sys_free);
+
+ generate_charts_arcstats("freebsd", "zfs", update_every);
+ generate_charts_arc_summary("freebsd", "zfs", update_every);
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// kstat.zfs.misc.zio_trim
+
+int do_kstat_zfs_misc_zio_trim(int update_every, usec_t dt) {
+ (void)dt;
+ static int mib_bytes[5] = {0, 0, 0, 0, 0}, mib_success[5] = {0, 0, 0, 0, 0},
+ mib_failed[5] = {0, 0, 0, 0, 0}, mib_unsupported[5] = {0, 0, 0, 0, 0};
+ uint64_t bytes, success, failed, unsupported;
+
+ if (unlikely(GETSYSCTL_SIMPLE("kstat.zfs.misc.zio_trim.bytes", mib_bytes, bytes) ||
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.zio_trim.success", mib_success, success) ||
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.zio_trim.failed", mib_failed, failed) ||
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.zio_trim.unsupported", mib_unsupported, unsupported))) {
+ error("DISABLED: zfs.trim_bytes chart");
+ error("DISABLED: zfs.trim_success chart");
+ error("DISABLED: kstat.zfs.misc.zio_trim module");
+ return 1;
+ } else {
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st_bytes = NULL;
+ static RRDDIM *rd_bytes = NULL;
+
+ if (unlikely(!st_bytes)) {
+ st_bytes = rrdset_create_localhost(
+ "zfs",
+ "trim_bytes",
+ NULL,
+ "trim",
+ NULL,
+ "Successfully TRIMmed bytes",
+ "bytes",
+ "freebsd",
+ "zfs",
+ 2320,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd_bytes = rrddim_add(st_bytes, "TRIMmed", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st_bytes);
+
+ rrddim_set_by_pointer(st_bytes, rd_bytes, bytes);
+ rrdset_done(st_bytes);
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st_requests = NULL;
+ static RRDDIM *rd_successful = NULL, *rd_failed = NULL, *rd_unsupported = NULL;
+
+ if (unlikely(!st_requests)) {
+ st_requests = rrdset_create_localhost(
+ "zfs",
+ "trim_requests",
+ NULL,
+ "trim",
+ NULL,
+ "TRIM requests",
+ "requests",
+ "freebsd",
+ "zfs",
+ 2321,
+ update_every,
+ RRDSET_TYPE_STACKED
+ );
+
+ rd_successful = rrddim_add(st_requests, "successful", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_failed = rrddim_add(st_requests, "failed", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_unsupported = rrddim_add(st_requests, "unsupported", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st_requests);
+
+ rrddim_set_by_pointer(st_requests, rd_successful, success);
+ rrddim_set_by_pointer(st_requests, rd_failed, failed);
+ rrddim_set_by_pointer(st_requests, rd_unsupported, unsupported);
+ rrdset_done(st_requests);
+
+ }
+
+ return 0;
+} \ No newline at end of file
diff --git a/collectors/freebsd.plugin/freebsd_sysctl.c b/collectors/freebsd.plugin/freebsd_sysctl.c
new file mode 100644
index 000000000..da5a351de
--- /dev/null
+++ b/collectors/freebsd.plugin/freebsd_sysctl.c
@@ -0,0 +1,3188 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_freebsd.h"
+
+#include <sys/vmmeter.h>
+#include <vm/vm_param.h>
+
+#define _KERNEL
+#include <sys/sem.h>
+#include <sys/shm.h>
+#include <sys/msg.h>
+#undef _KERNEL
+
+#include <net/netisr.h>
+
+#include <netinet/ip.h>
+#include <netinet/ip_var.h>
+#include <netinet/ip_icmp.h>
+#include <netinet/icmp_var.h>
+#include <netinet6/ip6_var.h>
+#include <netinet/icmp6.h>
+#include <netinet/tcp_var.h>
+#include <netinet/tcp_fsm.h>
+#include <netinet/udp.h>
+#include <netinet/udp_var.h>
+
+// --------------------------------------------------------------------------------------------------------------------
+// common definitions and variables
+
+int system_pagesize = PAGE_SIZE;
+int number_of_cpus = 1;
+#if __FreeBSD_version >= 1200029
+struct __vmmeter {
+ uint64_t v_swtch;
+ uint64_t v_trap;
+ uint64_t v_syscall;
+ uint64_t v_intr;
+ uint64_t v_soft;
+ uint64_t v_vm_faults;
+ uint64_t v_io_faults;
+ uint64_t v_cow_faults;
+ uint64_t v_cow_optim;
+ uint64_t v_zfod;
+ uint64_t v_ozfod;
+ uint64_t v_swapin;
+ uint64_t v_swapout;
+ uint64_t v_swappgsin;
+ uint64_t v_swappgsout;
+ uint64_t v_vnodein;
+ uint64_t v_vnodeout;
+ uint64_t v_vnodepgsin;
+ uint64_t v_vnodepgsout;
+ uint64_t v_intrans;
+ uint64_t v_reactivated;
+ uint64_t v_pdwakeups;
+ uint64_t v_pdpages;
+ uint64_t v_pdshortfalls;
+ uint64_t v_dfree;
+ uint64_t v_pfree;
+ uint64_t v_tfree;
+ uint64_t v_forks;
+ uint64_t v_vforks;
+ uint64_t v_rforks;
+ uint64_t v_kthreads;
+ uint64_t v_forkpages;
+ uint64_t v_vforkpages;
+ uint64_t v_rforkpages;
+ uint64_t v_kthreadpages;
+ u_int v_page_size;
+ u_int v_page_count;
+ u_int v_free_reserved;
+ u_int v_free_target;
+ u_int v_free_min;
+ u_int v_free_count;
+ u_int v_wire_count;
+ u_int v_active_count;
+ u_int v_inactive_target;
+ u_int v_inactive_count;
+ u_int v_laundry_count;
+ u_int v_pageout_free_min;
+ u_int v_interrupt_free_min;
+ u_int v_free_severe;
+};
+typedef struct __vmmeter vmmeter_t;
+#else
+typedef struct vmmeter vmmeter_t;
+#endif
+
+#if (__FreeBSD_version >= 1101516 && __FreeBSD_version < 1200000) || __FreeBSD_version >= 1200015
+#define NETDATA_COLLECT_LAUNDRY 1
+#endif
+
+// --------------------------------------------------------------------------------------------------------------------
+// FreeBSD plugin initialization
+
+int freebsd_plugin_init()
+{
+ system_pagesize = getpagesize();
+ if (system_pagesize <= 0) {
+ error("FREEBSD: can't get system page size");
+ return 1;
+ }
+
+ if (unlikely(GETSYSCTL_BY_NAME("kern.smp.cpus", number_of_cpus))) {
+ error("FREEBSD: can't get number of cpus");
+ return 1;
+ }
+
+ if (unlikely(!number_of_cpus)) {
+ error("FREEBSD: wrong number of cpus");
+ return 1;
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// vm.loadavg
+
+// FreeBSD calculates load averages once every 5 seconds
+#define MIN_LOADAVG_UPDATE_EVERY 5
+
+int do_vm_loadavg(int update_every, usec_t dt){
+ static usec_t next_loadavg_dt = 0;
+
+ if (next_loadavg_dt <= dt) {
+ static int mib[2] = {0, 0};
+ struct loadavg sysload;
+
+ if (unlikely(GETSYSCTL_SIMPLE("vm.loadavg", mib, sysload))) {
+ error("DISABLED: system.load chart");
+ error("DISABLED: vm.loadavg module");
+ return 1;
+ } else {
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_load1 = NULL, *rd_load2 = NULL, *rd_load3 = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system",
+ "load",
+ NULL,
+ "load",
+ NULL,
+ "System Load Average",
+ "load",
+ "freebsd.plugin",
+ "vm.loadavg",
+ NETDATA_CHART_PRIO_SYSTEM_LOAD,
+ (update_every < MIN_LOADAVG_UPDATE_EVERY) ?
+ MIN_LOADAVG_UPDATE_EVERY : update_every, RRDSET_TYPE_LINE
+ );
+ rd_load1 = rrddim_add(st, "load1", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ rd_load2 = rrddim_add(st, "load5", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ rd_load3 = rrddim_add(st, "load15", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_load1, (collected_number) ((double) sysload.ldavg[0] / sysload.fscale * 1000));
+ rrddim_set_by_pointer(st, rd_load2, (collected_number) ((double) sysload.ldavg[1] / sysload.fscale * 1000));
+ rrddim_set_by_pointer(st, rd_load3, (collected_number) ((double) sysload.ldavg[2] / sysload.fscale * 1000));
+ rrdset_done(st);
+
+ next_loadavg_dt = st->update_every * USEC_PER_SEC;
+ }
+ }
+ else
+ next_loadavg_dt -= dt;
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// vm.vmtotal
+
+int do_vm_vmtotal(int update_every, usec_t dt) {
+ (void)dt;
+ static int do_all_processes = -1, do_processes = -1, do_committed = -1;
+
+ if (unlikely(do_all_processes == -1)) {
+ do_all_processes = config_get_boolean("plugin:freebsd:vm.vmtotal", "enable total processes", 1);
+ do_processes = config_get_boolean("plugin:freebsd:vm.vmtotal", "processes running", 1);
+ do_committed = config_get_boolean("plugin:freebsd:vm.vmtotal", "committed memory", 1);
+ }
+
+ if (likely(do_all_processes | do_processes | do_committed)) {
+ static int mib[2] = {0, 0};
+ struct vmtotal vmtotal_data;
+
+ if (unlikely(GETSYSCTL_SIMPLE("vm.vmtotal", mib, vmtotal_data))) {
+ do_all_processes = 0;
+ error("DISABLED: system.active_processes chart");
+ do_processes = 0;
+ error("DISABLED: system.processes chart");
+ do_committed = 0;
+ error("DISABLED: mem.committed chart");
+ error("DISABLED: vm.vmtotal module");
+ return 1;
+ } else {
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_all_processes)) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system",
+ "active_processes",
+ NULL,
+ "processes",
+ NULL,
+ "System Active Processes",
+ "processes",
+ "freebsd.plugin",
+ "vm.vmtotal",
+ NETDATA_CHART_PRIO_SYSTEM_ACTIVE_PROCESSES,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+ rd = rrddim_add(st, "active", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd, (vmtotal_data.t_rq + vmtotal_data.t_dw + vmtotal_data.t_pw + vmtotal_data.t_sl + vmtotal_data.t_sw));
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_processes)) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_running = NULL, *rd_blocked = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system",
+ "processes",
+ NULL,
+ "processes",
+ NULL,
+ "System Processes",
+ "processes",
+ "freebsd.plugin",
+ "vm.vmtotal",
+ NETDATA_CHART_PRIO_SYSTEM_PROCESSES,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd_running = rrddim_add(st, "running", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rd_blocked = rrddim_add(st, "blocked", NULL, -1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_running, vmtotal_data.t_rq);
+ rrddim_set_by_pointer(st, rd_blocked, (vmtotal_data.t_dw + vmtotal_data.t_pw));
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_committed)) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "mem",
+ "committed",
+ NULL,
+ "system",
+ NULL,
+ "Committed (Allocated) Memory",
+ "MB",
+ "freebsd.plugin",
+ "vm.vmtotal",
+ NETDATA_CHART_PRIO_MEM_SYSTEM_COMMITTED,
+ update_every,
+ RRDSET_TYPE_AREA
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd = rrddim_add(st, "Committed_AS", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd, vmtotal_data.t_rm);
+ rrdset_done(st);
+ }
+ }
+ } else {
+ error("DISABLED: vm.vmtotal module");
+ return 1;
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// kern.cp_time
+
+int do_kern_cp_time(int update_every, usec_t dt) {
+ (void)dt;
+
+ if (unlikely(CPUSTATES != 5)) {
+ error("FREEBSD: There are %d CPU states (5 was expected)", CPUSTATES);
+ error("DISABLED: system.cpu chart");
+ error("DISABLED: kern.cp_time module");
+ return 1;
+ } else {
+ static int mib[2] = {0, 0};
+ long cp_time[CPUSTATES];
+
+ if (unlikely(GETSYSCTL_SIMPLE("kern.cp_time", mib, cp_time))) {
+ error("DISABLED: system.cpu chart");
+ error("DISABLED: kern.cp_time module");
+ return 1;
+ } else {
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_nice = NULL, *rd_system = NULL, *rd_user = NULL, *rd_interrupt = NULL, *rd_idle = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system",
+ "cpu",
+ NULL,
+ "cpu",
+ "system.cpu",
+ "Total CPU utilization",
+ "percentage",
+ "freebsd.plugin",
+ "kern.cp_time",
+ NETDATA_CHART_PRIO_SYSTEM_CPU,
+ update_every,
+ RRDSET_TYPE_STACKED
+ );
+
+ rd_nice = rrddim_add(st, "nice", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_system = rrddim_add(st, "system", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_user = rrddim_add(st, "user", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_interrupt = rrddim_add(st, "interrupt", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_idle = rrddim_add(st, "idle", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rrddim_hide(st, "idle");
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_nice, cp_time[1]);
+ rrddim_set_by_pointer(st, rd_system, cp_time[2]);
+ rrddim_set_by_pointer(st, rd_user, cp_time[0]);
+ rrddim_set_by_pointer(st, rd_interrupt, cp_time[3]);
+ rrddim_set_by_pointer(st, rd_idle, cp_time[4]);
+ rrdset_done(st);
+ }
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// kern.cp_times
+
+int do_kern_cp_times(int update_every, usec_t dt) {
+ (void)dt;
+
+ if (unlikely(CPUSTATES != 5)) {
+ error("FREEBSD: There are %d CPU states (5 was expected)", CPUSTATES);
+ error("DISABLED: cpu.cpuXX charts");
+ error("DISABLED: kern.cp_times module");
+ return 1;
+ } else {
+ static int mib[2] = {0, 0};
+ long cp_time[CPUSTATES];
+ static long *pcpu_cp_time = NULL;
+ static int old_number_of_cpus = 0;
+
+ if(unlikely(number_of_cpus != old_number_of_cpus))
+ pcpu_cp_time = reallocz(pcpu_cp_time, sizeof(cp_time) * number_of_cpus);
+ if (unlikely(GETSYSCTL_WSIZE("kern.cp_times", mib, pcpu_cp_time, sizeof(cp_time) * number_of_cpus))) {
+ error("DISABLED: cpu.cpuXX charts");
+ error("DISABLED: kern.cp_times module");
+ return 1;
+ } else {
+
+ // --------------------------------------------------------------------
+
+ int i;
+ static struct cpu_chart {
+ char cpuid[MAX_INT_DIGITS + 4];
+ RRDSET *st;
+ RRDDIM *rd_user;
+ RRDDIM *rd_nice;
+ RRDDIM *rd_system;
+ RRDDIM *rd_interrupt;
+ RRDDIM *rd_idle;
+ } *all_cpu_charts = NULL;
+
+ if(unlikely(number_of_cpus > old_number_of_cpus)) {
+ all_cpu_charts = reallocz(all_cpu_charts, sizeof(struct cpu_chart) * number_of_cpus);
+ memset(&all_cpu_charts[old_number_of_cpus], 0, sizeof(struct cpu_chart) * (number_of_cpus - old_number_of_cpus));
+ }
+
+ for (i = 0; i < number_of_cpus; i++) {
+ if (unlikely(!all_cpu_charts[i].st)) {
+ snprintfz(all_cpu_charts[i].cpuid, MAX_INT_DIGITS, "cpu%d", i);
+ all_cpu_charts[i].st = rrdset_create_localhost(
+ "cpu",
+ all_cpu_charts[i].cpuid,
+ NULL,
+ "utilization",
+ "cpu.cpu",
+ "Core utilization",
+ "percentage",
+ "freebsd.plugin",
+ "kern.cp_times",
+ NETDATA_CHART_PRIO_CPU_PER_CORE,
+ update_every,
+ RRDSET_TYPE_STACKED
+ );
+
+ all_cpu_charts[i].rd_nice = rrddim_add(all_cpu_charts[i].st, "nice", NULL, 1, 1,
+ RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ all_cpu_charts[i].rd_system = rrddim_add(all_cpu_charts[i].st, "system", NULL, 1, 1,
+ RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ all_cpu_charts[i].rd_user = rrddim_add(all_cpu_charts[i].st, "user", NULL, 1, 1,
+ RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ all_cpu_charts[i].rd_interrupt = rrddim_add(all_cpu_charts[i].st, "interrupt", NULL, 1, 1,
+ RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ all_cpu_charts[i].rd_idle = rrddim_add(all_cpu_charts[i].st, "idle", NULL, 1, 1,
+ RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rrddim_hide(all_cpu_charts[i].st, "idle");
+ } else rrdset_next(all_cpu_charts[i].st);
+
+ rrddim_set_by_pointer(all_cpu_charts[i].st, all_cpu_charts[i].rd_nice, pcpu_cp_time[i * 5 + 1]);
+ rrddim_set_by_pointer(all_cpu_charts[i].st, all_cpu_charts[i].rd_system, pcpu_cp_time[i * 5 + 2]);
+ rrddim_set_by_pointer(all_cpu_charts[i].st, all_cpu_charts[i].rd_user, pcpu_cp_time[i * 5 + 0]);
+ rrddim_set_by_pointer(all_cpu_charts[i].st, all_cpu_charts[i].rd_interrupt, pcpu_cp_time[i * 5 + 3]);
+ rrddim_set_by_pointer(all_cpu_charts[i].st, all_cpu_charts[i].rd_idle, pcpu_cp_time[i * 5 + 4]);
+ rrdset_done(all_cpu_charts[i].st);
+ }
+ }
+
+ old_number_of_cpus = number_of_cpus;
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// dev.cpu.temperature
+
+int do_dev_cpu_temperature(int update_every, usec_t dt) {
+ (void)dt;
+
+ int i;
+ static int *mib = NULL;
+ static int *pcpu_temperature = NULL;
+ static int old_number_of_cpus = 0;
+ char char_mib[MAX_INT_DIGITS + 21];
+ char char_rd[MAX_INT_DIGITS + 9];
+
+ if (unlikely(number_of_cpus != old_number_of_cpus)) {
+ pcpu_temperature = reallocz(pcpu_temperature, sizeof(int) * number_of_cpus);
+ mib = reallocz(mib, sizeof(int) * number_of_cpus * 4);
+ if (unlikely(number_of_cpus > old_number_of_cpus))
+ memset(&mib[old_number_of_cpus * 4], 0, sizeof(RRDDIM) * (number_of_cpus - old_number_of_cpus));
+ }
+ for (i = 0; i < number_of_cpus; i++) {
+ if (unlikely(!(mib[i * 4])))
+ sprintf(char_mib, "dev.cpu.%d.temperature", i);
+ if (unlikely(getsysctl_simple(char_mib, &mib[i * 4], 4, &pcpu_temperature[i], sizeof(int)))) {
+ error("DISABLED: cpu.temperature chart");
+ error("DISABLED: dev.cpu.temperature module");
+ return 1;
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st;
+ static RRDDIM **rd_pcpu_temperature;
+
+ if (unlikely(number_of_cpus != old_number_of_cpus)) {
+ rd_pcpu_temperature = reallocz(rd_pcpu_temperature, sizeof(RRDDIM) * number_of_cpus);
+ if (unlikely(number_of_cpus > old_number_of_cpus))
+ memset(&rd_pcpu_temperature[old_number_of_cpus], 0, sizeof(RRDDIM) * (number_of_cpus - old_number_of_cpus));
+ }
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "cpu",
+ "temperature",
+ NULL,
+ "temperature",
+ "cpu.temperatute",
+ "Core temperature",
+ "Celsius",
+ "freebsd.plugin",
+ "dev.cpu.temperature",
+ NETDATA_CHART_PRIO_CPU_TEMPERATURE,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+ }
+ else rrdset_next(st);
+
+ for (i = 0; i < number_of_cpus; i++) {
+ if (unlikely(!rd_pcpu_temperature[i])) {
+ sprintf(char_rd, "cpu%d.temp", i);
+ rd_pcpu_temperature[i] = rrddim_add(st, char_rd, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ rrddim_set_by_pointer(st, rd_pcpu_temperature[i], (collected_number) ((double)pcpu_temperature[i] / 10 - 273.15));
+ }
+
+ rrdset_done(st);
+
+ old_number_of_cpus = number_of_cpus;
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// dev.cpu.0.freq
+
+int do_dev_cpu_0_freq(int update_every, usec_t dt) {
+ (void)dt;
+ static int mib[4] = {0, 0, 0, 0};
+ int cpufreq;
+
+ if (unlikely(GETSYSCTL_SIMPLE("dev.cpu.0.freq", mib, cpufreq))) {
+ error("DISABLED: cpu.scaling_cur_freq chart");
+ error("DISABLED: dev.cpu.0.freq module");
+ return 1;
+ } else {
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "cpu",
+ "scaling_cur_freq",
+ NULL,
+ "cpufreq",
+ NULL,
+ "Current CPU Scaling Frequency",
+ "MHz",
+ "freebsd.plugin",
+ "dev.cpu.0.freq",
+ NETDATA_CHART_PRIO_CPUFREQ_SCALING_CUR_FREQ,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd = rrddim_add(st, "frequency", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd, cpufreq);
+ rrdset_done(st);
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// hw.intrcnt
+
+int do_hw_intcnt(int update_every, usec_t dt) {
+ (void)dt;
+ static int mib_hw_intrcnt[2] = {0, 0};
+ size_t intrcnt_size = 0;
+ unsigned long i;
+
+ if (unlikely(GETSYSCTL_SIZE("hw.intrcnt", mib_hw_intrcnt, intrcnt_size))) {
+ error("DISABLED: system.intr chart");
+ error("DISABLED: system.interrupts chart");
+ error("DISABLED: hw.intrcnt module");
+ return 1;
+ } else {
+ unsigned long nintr = 0;
+ static unsigned long old_nintr = 0;
+ static unsigned long *intrcnt = NULL;
+ unsigned long long totalintr = 0;
+
+ nintr = intrcnt_size / sizeof(u_long);
+ if (unlikely(nintr != old_nintr))
+ intrcnt = reallocz(intrcnt, nintr * sizeof(u_long));
+ if (unlikely(GETSYSCTL_WSIZE("hw.intrcnt", mib_hw_intrcnt, intrcnt, nintr * sizeof(u_long)))) {
+ error("DISABLED: system.intr chart");
+ error("DISABLED: system.interrupts chart");
+ error("DISABLED: hw.intrcnt module");
+ return 1;
+ } else {
+ for (i = 0; i < nintr; i++)
+ totalintr += intrcnt[i];
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st_intr = NULL;
+ static RRDDIM *rd_intr = NULL;
+
+ if (unlikely(!st_intr)) {
+ st_intr = rrdset_create_localhost(
+ "system",
+ "intr",
+ NULL,
+ "interrupts",
+ NULL,
+ "Total Hardware Interrupts",
+ "interrupts/s",
+ "freebsd.plugin",
+ "hw.intrcnt",
+ NETDATA_CHART_PRIO_SYSTEM_INTR,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st_intr, RRDSET_FLAG_DETAIL);
+
+ rd_intr = rrddim_add(st_intr, "interrupts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st_intr);
+
+ rrddim_set_by_pointer(st_intr, rd_intr, totalintr);
+ rrdset_done(st_intr);
+
+ // --------------------------------------------------------------------
+
+ size_t size;
+ static int mib_hw_intrnames[2] = {0, 0};
+ static char *intrnames = NULL;
+
+ size = nintr * (MAXCOMLEN + 1);
+ if (unlikely(nintr != old_nintr))
+ intrnames = reallocz(intrnames, size);
+ if (unlikely(GETSYSCTL_WSIZE("hw.intrnames", mib_hw_intrnames, intrnames, size))) {
+ error("DISABLED: system.intr chart");
+ error("DISABLED: system.interrupts chart");
+ error("DISABLED: hw.intrcnt module");
+ return 1;
+ } else {
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st_interrupts = NULL;
+ void *p;
+
+ if (unlikely(!st_interrupts))
+ st_interrupts = rrdset_create_localhost(
+ "system",
+ "interrupts",
+ NULL,
+ "interrupts",
+ NULL,
+ "System interrupts",
+ "interrupts/s",
+ "freebsd.plugin",
+ "hw.intrcnt",
+ NETDATA_CHART_PRIO_SYSTEM_INTERRUPTS,
+ update_every,
+ RRDSET_TYPE_STACKED
+ );
+ else
+ rrdset_next(st_interrupts);
+
+ for (i = 0; i < nintr; i++) {
+ p = intrnames + i * (MAXCOMLEN + 1);
+ if (unlikely((intrcnt[i] != 0) && (*(char *) p != 0))) {
+ RRDDIM *rd_interrupts = rrddim_find(st_interrupts, p);
+
+ if (unlikely(!rd_interrupts))
+ rd_interrupts = rrddim_add(st_interrupts, p, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st_interrupts, rd_interrupts, intrcnt[i]);
+ }
+ }
+ rrdset_done(st_interrupts);
+ }
+ }
+
+ old_nintr = nintr;
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// vm.stats.sys.v_intr
+
+int do_vm_stats_sys_v_intr(int update_every, usec_t dt) {
+ (void)dt;
+ static int mib[4] = {0, 0, 0, 0};
+ u_int int_number;
+
+ if (unlikely(GETSYSCTL_SIMPLE("vm.stats.sys.v_intr", mib, int_number))) {
+ error("DISABLED: system.dev_intr chart");
+ error("DISABLED: vm.stats.sys.v_intr module");
+ return 1;
+ } else {
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system",
+ "dev_intr",
+ NULL,
+ "interrupts",
+ NULL,
+ "Device Interrupts",
+ "interrupts/s",
+ "freebsd.plugin",
+ "vm.stats.sys.v_intr",
+ NETDATA_CHART_PRIO_SYSTEM_DEV_INTR,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd = rrddim_add(st, "interrupts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd, int_number);
+ rrdset_done(st);
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// vm.stats.sys.v_soft
+
+int do_vm_stats_sys_v_soft(int update_every, usec_t dt) {
+ (void)dt;
+ static int mib[4] = {0, 0, 0, 0};
+ u_int soft_intr_number;
+
+ if (unlikely(GETSYSCTL_SIMPLE("vm.stats.sys.v_soft", mib, soft_intr_number))) {
+ error("DISABLED: system.dev_intr chart");
+ error("DISABLED: vm.stats.sys.v_soft module");
+ return 1;
+ } else {
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system",
+ "soft_intr",
+ NULL,
+ "interrupts",
+ NULL,
+ "Software Interrupts",
+ "interrupts/s",
+ "freebsd.plugin",
+ "vm.stats.sys.v_soft",
+ NETDATA_CHART_PRIO_SYSTEM_SOFT_INTR,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd = rrddim_add(st, "interrupts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd, soft_intr_number);
+ rrdset_done(st);
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// vm.stats.sys.v_swtch
+
+int do_vm_stats_sys_v_swtch(int update_every, usec_t dt) {
+ (void)dt;
+ static int mib[4] = {0, 0, 0, 0};
+ u_int ctxt_number;
+
+ if (unlikely(GETSYSCTL_SIMPLE("vm.stats.sys.v_swtch", mib, ctxt_number))) {
+ error("DISABLED: system.ctxt chart");
+ error("DISABLED: vm.stats.sys.v_swtch module");
+ return 1;
+ } else {
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system",
+ "ctxt",
+ NULL,
+ "processes",
+ NULL,
+ "CPU Context Switches",
+ "context switches/s",
+ "freebsd.plugin",
+ "vm.stats.sys.v_swtch",
+ NETDATA_CHART_PRIO_SYSTEM_CTXT,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd = rrddim_add(st, "switches", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd, ctxt_number);
+ rrdset_done(st);
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// vm.stats.vm.v_forks
+
+int do_vm_stats_sys_v_forks(int update_every, usec_t dt) {
+ (void)dt;
+ static int mib[4] = {0, 0, 0, 0};
+ u_int forks_number;
+
+ if (unlikely(GETSYSCTL_SIMPLE("vm.stats.vm.v_forks", mib, forks_number))) {
+ error("DISABLED: system.forks chart");
+ error("DISABLED: vm.stats.sys.v_swtch module");
+ return 1;
+ } else {
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system",
+ "forks",
+ NULL,
+ "processes",
+ NULL,
+ "Started Processes",
+ "processes/s",
+ "freebsd.plugin",
+ "vm.stats.sys.v_swtch",
+ NETDATA_CHART_PRIO_SYSTEM_FORKS,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd = rrddim_add(st, "started", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd, forks_number);
+ rrdset_done(st);
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// vm.swap_info
+
+int do_vm_swap_info(int update_every, usec_t dt) {
+ (void)dt;
+ static int mib[3] = {0, 0, 0};
+
+ if (unlikely(getsysctl_mib("vm.swap_info", mib, 2))) {
+ error("DISABLED: system.swap chart");
+ error("DISABLED: vm.swap_info module");
+ return 1;
+ } else {
+ int i;
+ struct xswdev xsw;
+ struct total_xsw {
+ collected_number bytes_used;
+ collected_number bytes_total;
+ } total_xsw = {0, 0};
+
+ for (i = 0; ; i++) {
+ size_t size;
+
+ mib[2] = i;
+ size = sizeof(xsw);
+ if (unlikely(sysctl(mib, 3, &xsw, &size, NULL, 0) == -1 )) {
+ if (unlikely(errno != ENOENT)) {
+ error("FREEBSD: sysctl(%s...) failed: %s", "vm.swap_info", strerror(errno));
+ error("DISABLED: system.swap chart");
+ error("DISABLED: vm.swap_info module");
+ return 1;
+ } else {
+ if (unlikely(size != sizeof(xsw))) {
+ error("FREEBSD: sysctl(%s...) expected %lu, got %lu", "vm.swap_info", (unsigned long)sizeof(xsw), (unsigned long)size);
+ error("DISABLED: system.swap chart");
+ error("DISABLED: vm.swap_info module");
+ return 1;
+ } else break;
+ }
+ }
+ total_xsw.bytes_used += xsw.xsw_used;
+ total_xsw.bytes_total += xsw.xsw_nblks;
+ }
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_free = NULL, *rd_used = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system",
+ "swap",
+ NULL,
+ "swap",
+ NULL,
+ "System Swap",
+ "MB",
+ "freebsd.plugin",
+ "vm.swap_info",
+ NETDATA_CHART_PRIO_SYSTEM_SWAP,
+ update_every,
+ RRDSET_TYPE_STACKED
+ );
+
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_free = rrddim_add(st, "free", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
+ rd_used = rrddim_add(st, "used", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_free, total_xsw.bytes_total - total_xsw.bytes_used);
+ rrddim_set_by_pointer(st, rd_used, total_xsw.bytes_used);
+ rrdset_done(st);
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// system.ram
+
+int do_system_ram(int update_every, usec_t dt) {
+ (void)dt;
+ static int mib_active_count[4] = {0, 0, 0, 0}, mib_inactive_count[4] = {0, 0, 0, 0}, mib_wire_count[4] = {0, 0, 0, 0},
+ mib_cache_count[4] = {0, 0, 0, 0}, mib_laundry_count[4] = {0, 0, 0, 0}, mib_vfs_bufspace[2] = {0, 0},
+ mib_free_count[4] = {0, 0, 0, 0};
+ vmmeter_t vmmeter_data;
+ int vfs_bufspace_count;
+
+ if (unlikely(GETSYSCTL_SIMPLE("vm.stats.vm.v_active_count", mib_active_count, vmmeter_data.v_active_count) ||
+ GETSYSCTL_SIMPLE("vm.stats.vm.v_inactive_count", mib_inactive_count, vmmeter_data.v_inactive_count) ||
+ GETSYSCTL_SIMPLE("vm.stats.vm.v_wire_count", mib_wire_count, vmmeter_data.v_wire_count) ||
+#if __FreeBSD_version < 1200016
+ GETSYSCTL_SIMPLE("vm.stats.vm.v_cache_count", mib_cache_count, vmmeter_data.v_cache_count) ||
+#endif
+#if defined(NETDATA_COLLECT_LAUNDRY)
+ GETSYSCTL_SIMPLE("vm.stats.vm.v_laundry_count", mib_laundry_count, vmmeter_data.v_laundry_count) ||
+#endif
+ GETSYSCTL_SIMPLE("vfs.bufspace", mib_vfs_bufspace, vfs_bufspace_count) ||
+ GETSYSCTL_SIMPLE("vm.stats.vm.v_free_count", mib_free_count, vmmeter_data.v_free_count))) {
+ error("DISABLED: system.ram chart");
+ error("DISABLED: system.ram module");
+ return 1;
+ } else {
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_free = NULL, *rd_active = NULL, *rd_inactive = NULL, *rd_wired = NULL,
+ *rd_cache = NULL, *rd_laundry = NULL, *rd_buffers = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system",
+ "ram",
+ NULL,
+ "ram",
+ NULL,
+ "System RAM",
+ "MB",
+ "freebsd.plugin",
+ "system.ram",
+ NETDATA_CHART_PRIO_SYSTEM_RAM,
+ update_every,
+ RRDSET_TYPE_STACKED
+ );
+
+ rd_free = rrddim_add(st, "free", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
+ rd_active = rrddim_add(st, "active", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
+ rd_inactive = rrddim_add(st, "inactive", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
+ rd_wired = rrddim_add(st, "wired", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
+#if __FreeBSD_version < 1200016
+ rd_cache = rrddim_add(st, "cache", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
+#endif
+#if defined(NETDATA_COLLECT_LAUNDRY)
+ rd_laundry = rrddim_add(st, "laundry", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
+#endif
+ rd_buffers = rrddim_add(st, "buffers", NULL, 1, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_free, vmmeter_data.v_free_count);
+ rrddim_set_by_pointer(st, rd_active, vmmeter_data.v_active_count);
+ rrddim_set_by_pointer(st, rd_inactive, vmmeter_data.v_inactive_count);
+ rrddim_set_by_pointer(st, rd_wired, vmmeter_data.v_wire_count);
+#if __FreeBSD_version < 1200016
+ rrddim_set_by_pointer(st, rd_cache, vmmeter_data.v_cache_count);
+#endif
+#if defined(NETDATA_COLLECT_LAUNDRY)
+ rrddim_set_by_pointer(st, rd_laundry, vmmeter_data.v_laundry_count);
+#endif
+ rrddim_set_by_pointer(st, rd_buffers, vfs_bufspace_count);
+ rrdset_done(st);
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// vm.stats.vm.v_swappgs
+
+int do_vm_stats_sys_v_swappgs(int update_every, usec_t dt) {
+ (void)dt;
+ static int mib_swappgsin[4] = {0, 0, 0, 0}, mib_swappgsout[4] = {0, 0, 0, 0};
+ vmmeter_t vmmeter_data;
+
+ if (unlikely(GETSYSCTL_SIMPLE("vm.stats.vm.v_swappgsin", mib_swappgsin, vmmeter_data.v_swappgsin) ||
+ GETSYSCTL_SIMPLE("vm.stats.vm.v_swappgsout", mib_swappgsout, vmmeter_data.v_swappgsout))) {
+ error("DISABLED: system.swapio chart");
+ error("DISABLED: vm.stats.vm.v_swappgs module");
+ return 1;
+ } else {
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system",
+ "swapio",
+ NULL,
+ "swap",
+ NULL,
+ "Swap I/O",
+ "kilobytes/s",
+ "freebsd.plugin",
+ "vm.stats.vm.v_swappgs",
+ NETDATA_CHART_PRIO_SYSTEM_SWAPIO,
+ update_every,
+ RRDSET_TYPE_AREA
+ );
+
+ rd_in = rrddim_add(st, "in", NULL, system_pagesize, KILO_FACTOR, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st, "out", NULL, -system_pagesize, KILO_FACTOR, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_in, vmmeter_data.v_swappgsin);
+ rrddim_set_by_pointer(st, rd_out, vmmeter_data.v_swappgsout);
+ rrdset_done(st);
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// vm.stats.vm.v_pgfaults
+
+int do_vm_stats_sys_v_pgfaults(int update_every, usec_t dt) {
+ (void)dt;
+ static int mib_vm_faults[4] = {0, 0, 0, 0}, mib_io_faults[4] = {0, 0, 0, 0}, mib_cow_faults[4] = {0, 0, 0, 0},
+ mib_cow_optim[4] = {0, 0, 0, 0}, mib_intrans[4] = {0, 0, 0, 0};
+ vmmeter_t vmmeter_data;
+
+ if (unlikely(GETSYSCTL_SIMPLE("vm.stats.vm.v_vm_faults", mib_vm_faults, vmmeter_data.v_vm_faults) ||
+ GETSYSCTL_SIMPLE("vm.stats.vm.v_io_faults", mib_io_faults, vmmeter_data.v_io_faults) ||
+ GETSYSCTL_SIMPLE("vm.stats.vm.v_cow_faults", mib_cow_faults, vmmeter_data.v_cow_faults) ||
+ GETSYSCTL_SIMPLE("vm.stats.vm.v_cow_optim", mib_cow_optim, vmmeter_data.v_cow_optim) ||
+ GETSYSCTL_SIMPLE("vm.stats.vm.v_intrans", mib_intrans, vmmeter_data.v_intrans))) {
+ error("DISABLED: mem.pgfaults chart");
+ error("DISABLED: vm.stats.vm.v_pgfaults module");
+ return 1;
+ } else {
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_memory = NULL, *rd_io_requiring = NULL, *rd_cow = NULL,
+ *rd_cow_optimized = NULL, *rd_in_transit = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "mem",
+ "pgfaults",
+ NULL,
+ "system",
+ NULL,
+ "Memory Page Faults",
+ "page faults/s",
+ "freebsd.plugin",
+ "vm.stats.vm.v_pgfaults",
+ NETDATA_CHART_PRIO_MEM_SYSTEM_PGFAULTS,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_memory = rrddim_add(st, "memory", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_io_requiring = rrddim_add(st, "io_requiring", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_cow = rrddim_add(st, "cow", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_cow_optimized = rrddim_add(st, "cow_optimized", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_transit = rrddim_add(st, "in_transit", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_memory, vmmeter_data.v_vm_faults);
+ rrddim_set_by_pointer(st, rd_io_requiring, vmmeter_data.v_io_faults);
+ rrddim_set_by_pointer(st, rd_cow, vmmeter_data.v_cow_faults);
+ rrddim_set_by_pointer(st, rd_cow_optimized, vmmeter_data.v_cow_optim);
+ rrddim_set_by_pointer(st, rd_in_transit, vmmeter_data.v_intrans);
+ rrdset_done(st);
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// kern.ipc.sem
+
+int do_kern_ipc_sem(int update_every, usec_t dt) {
+ (void)dt;
+ static int mib_semmni[3] = {0, 0, 0}, mib_sema[3] = {0, 0, 0};
+ struct ipc_sem {
+ int semmni;
+ collected_number sets;
+ collected_number semaphores;
+ } ipc_sem = {0, 0, 0};
+
+ if (unlikely(GETSYSCTL_SIMPLE("kern.ipc.semmni", mib_semmni, ipc_sem.semmni))) {
+ error("DISABLED: system.ipc_semaphores chart");
+ error("DISABLED: system.ipc_semaphore_arrays chart");
+ error("DISABLED: kern.ipc.sem module");
+ return 1;
+ } else {
+ static struct semid_kernel *ipc_sem_data = NULL;
+ static int old_semmni = 0;
+
+ if (unlikely(ipc_sem.semmni != old_semmni)) {
+ ipc_sem_data = reallocz(ipc_sem_data, sizeof(struct semid_kernel) * ipc_sem.semmni);
+ old_semmni = ipc_sem.semmni;
+ }
+ if (unlikely(GETSYSCTL_WSIZE("kern.ipc.sema", mib_sema, ipc_sem_data, sizeof(struct semid_kernel) * ipc_sem.semmni))) {
+ error("DISABLED: system.ipc_semaphores chart");
+ error("DISABLED: system.ipc_semaphore_arrays chart");
+ error("DISABLED: kern.ipc.sem module");
+ return 1;
+ } else {
+ int i;
+
+ for (i = 0; i < ipc_sem.semmni; i++) {
+ if (unlikely(ipc_sem_data[i].u.sem_perm.mode & SEM_ALLOC)) {
+ ipc_sem.sets += 1;
+ ipc_sem.semaphores += ipc_sem_data[i].u.sem_nsems;
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st_semaphores = NULL, *st_semaphore_arrays = NULL;
+ static RRDDIM *rd_semaphores = NULL, *rd_semaphore_arrays = NULL;
+
+ if (unlikely(!st_semaphores)) {
+ st_semaphores = rrdset_create_localhost(
+ "system",
+ "ipc_semaphores",
+ NULL,
+ "ipc semaphores",
+ NULL,
+ "IPC Semaphores",
+ "semaphores",
+ "freebsd.plugin",
+ "kern.ipc.sem",
+ NETDATA_CHART_PRIO_SYSTEM_IPC_SEMAPHORES,
+ update_every,
+ RRDSET_TYPE_AREA
+ );
+
+ rd_semaphores = rrddim_add(st_semaphores, "semaphores", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st_semaphores);
+
+ rrddim_set_by_pointer(st_semaphores, rd_semaphores, ipc_sem.semaphores);
+ rrdset_done(st_semaphores);
+
+ // --------------------------------------------------------------------
+
+ if (unlikely(!st_semaphore_arrays)) {
+ st_semaphore_arrays = rrdset_create_localhost(
+ "system",
+ "ipc_semaphore_arrays",
+ NULL,
+ "ipc semaphores",
+ NULL,
+ "IPC Semaphore Arrays",
+ "arrays",
+ "freebsd.plugin",
+ "kern.ipc.sem",
+ NETDATA_CHART_PRIO_SYSTEM_IPC_SEM_ARRAYS,
+ update_every,
+ RRDSET_TYPE_AREA
+ );
+
+ rd_semaphore_arrays = rrddim_add(st_semaphore_arrays, "arrays", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st_semaphore_arrays);
+
+ rrddim_set_by_pointer(st_semaphore_arrays, rd_semaphore_arrays, ipc_sem.sets);
+ rrdset_done(st_semaphore_arrays);
+ }
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// kern.ipc.shm
+
+int do_kern_ipc_shm(int update_every, usec_t dt) {
+ (void)dt;
+ static int mib_shmmni[3] = {0, 0, 0}, mib_shmsegs[3] = {0, 0, 0};
+ struct ipc_shm {
+ u_long shmmni;
+ collected_number segs;
+ collected_number segsize;
+ } ipc_shm = {0, 0, 0};
+
+ if (unlikely(GETSYSCTL_SIMPLE("kern.ipc.shmmni", mib_shmmni, ipc_shm.shmmni))) {
+ error("DISABLED: system.ipc_shared_mem_segs chart");
+ error("DISABLED: system.ipc_shared_mem_size chart");
+ error("DISABLED: kern.ipc.shmmodule");
+ return 1;
+ } else {
+ static struct shmid_kernel *ipc_shm_data = NULL;
+ static u_long old_shmmni = 0;
+
+ if (unlikely(ipc_shm.shmmni != old_shmmni)) {
+ ipc_shm_data = reallocz(ipc_shm_data, sizeof(struct shmid_kernel) * ipc_shm.shmmni);
+ old_shmmni = ipc_shm.shmmni;
+ }
+ if (unlikely(
+ GETSYSCTL_WSIZE("kern.ipc.shmsegs", mib_shmsegs, ipc_shm_data, sizeof(struct shmid_kernel) * ipc_shm.shmmni))) {
+ error("DISABLED: system.ipc_shared_mem_segs chart");
+ error("DISABLED: system.ipc_shared_mem_size chart");
+ error("DISABLED: kern.ipc.shmmodule");
+ return 1;
+ } else {
+ unsigned long i;
+
+ for (i = 0; i < ipc_shm.shmmni; i++) {
+ if (unlikely(ipc_shm_data[i].u.shm_perm.mode & 0x0800)) {
+ ipc_shm.segs += 1;
+ ipc_shm.segsize += ipc_shm_data[i].u.shm_segsz;
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st_segs = NULL, *st_size = NULL;
+ static RRDDIM *rd_segments = NULL, *rd_allocated = NULL;
+
+ if (unlikely(!st_segs)) {
+ st_segs = rrdset_create_localhost(
+ "system",
+ "ipc_shared_mem_segs",
+ NULL,
+ "ipc shared memory",
+ NULL,
+ "IPC Shared Memory Segments",
+ "segments",
+ "freebsd.plugin",
+ "kern.ipc.shm",
+ NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_SEGS,
+ update_every,
+ RRDSET_TYPE_AREA
+ );
+
+ rd_segments = rrddim_add(st_segs, "segments", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st_segs);
+
+ rrddim_set_by_pointer(st_segs, rd_segments, ipc_shm.segs);
+ rrdset_done(st_segs);
+
+ // --------------------------------------------------------------------
+
+ if (unlikely(!st_size)) {
+ st_size = rrdset_create_localhost(
+ "system",
+ "ipc_shared_mem_size",
+ NULL,
+ "ipc shared memory",
+ NULL,
+ "IPC Shared Memory Segments Size",
+ "kilobytes",
+ "freebsd.plugin",
+ "kern.ipc.shm",
+ NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_SIZE,
+ update_every,
+ RRDSET_TYPE_AREA
+ );
+
+ rd_allocated = rrddim_add(st_size, "allocated", NULL, 1, KILO_FACTOR, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st_size);
+
+ rrddim_set_by_pointer(st_size, rd_allocated, ipc_shm.segsize);
+ rrdset_done(st_size);
+ }
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// kern.ipc.msq
+
+int do_kern_ipc_msq(int update_every, usec_t dt) {
+ (void)dt;
+ static int mib_msgmni[3] = {0, 0, 0}, mib_msqids[3] = {0, 0, 0};
+ struct ipc_msq {
+ int msgmni;
+ collected_number queues;
+ collected_number messages;
+ collected_number usedsize;
+ collected_number allocsize;
+ } ipc_msq = {0, 0, 0, 0, 0};
+
+ if (unlikely(GETSYSCTL_SIMPLE("kern.ipc.msgmni", mib_msgmni, ipc_msq.msgmni))) {
+ error("DISABLED: system.ipc_msq_queues chart");
+ error("DISABLED: system.ipc_msq_messages chart");
+ error("DISABLED: system.ipc_msq_size chart");
+ error("DISABLED: kern.ipc.msg module");
+ return 1;
+ } else {
+ static struct msqid_kernel *ipc_msq_data = NULL;
+ static int old_msgmni = 0;
+
+ if (unlikely(ipc_msq.msgmni != old_msgmni)) {
+ ipc_msq_data = reallocz(ipc_msq_data, sizeof(struct msqid_kernel) * ipc_msq.msgmni);
+ old_msgmni = ipc_msq.msgmni;
+ }
+ if (unlikely(
+ GETSYSCTL_WSIZE("kern.ipc.msqids", mib_msqids, ipc_msq_data, sizeof(struct msqid_kernel) * ipc_msq.msgmni))) {
+ error("DISABLED: system.ipc_msq_queues chart");
+ error("DISABLED: system.ipc_msq_messages chart");
+ error("DISABLED: system.ipc_msq_size chart");
+ error("DISABLED: kern.ipc.msg module");
+ return 1;
+ } else {
+ int i;
+
+ for (i = 0; i < ipc_msq.msgmni; i++) {
+ if (unlikely(ipc_msq_data[i].u.msg_qbytes != 0)) {
+ ipc_msq.queues += 1;
+ ipc_msq.messages += ipc_msq_data[i].u.msg_qnum;
+ ipc_msq.usedsize += ipc_msq_data[i].u.msg_cbytes;
+ ipc_msq.allocsize += ipc_msq_data[i].u.msg_qbytes;
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st_queues = NULL, *st_messages = NULL, *st_size = NULL;
+ static RRDDIM *rd_queues = NULL, *rd_messages = NULL, *rd_allocated = NULL, *rd_used = NULL;
+
+ if (unlikely(!st_queues)) {
+ st_queues = rrdset_create_localhost(
+ "system",
+ "ipc_msq_queues",
+ NULL,
+ "ipc message queues",
+ NULL,
+ "Number of IPC Message Queues",
+ "queues",
+ "freebsd.plugin",
+ "kern.ipc.msq",
+ NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_QUEUES,
+ update_every,
+ RRDSET_TYPE_AREA
+ );
+
+ rd_queues = rrddim_add(st_queues, "queues", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st_queues);
+
+ rrddim_set_by_pointer(st_queues, rd_queues, ipc_msq.queues);
+ rrdset_done(st_queues);
+
+ // --------------------------------------------------------------------
+
+ if (unlikely(!st_messages)) {
+ st_messages = rrdset_create_localhost(
+ "system",
+ "ipc_msq_messages",
+ NULL,
+ "ipc message queues",
+ NULL,
+ "Number of Messages in IPC Message Queues",
+ "messages",
+ "freebsd.plugin",
+ "kern.ipc.msq",
+ NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_MESSAGES,
+ update_every,
+ RRDSET_TYPE_AREA
+ );
+
+ rd_messages = rrddim_add(st_messages, "messages", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st_messages);
+
+ rrddim_set_by_pointer(st_messages, rd_messages, ipc_msq.messages);
+ rrdset_done(st_messages);
+
+ // --------------------------------------------------------------------
+
+ if (unlikely(!st_size)) {
+ st_size = rrdset_create_localhost(
+ "system",
+ "ipc_msq_size",
+ NULL,
+ "ipc message queues",
+ NULL,
+ "Size of IPC Message Queues",
+ "bytes",
+ "freebsd.plugin",
+ "kern.ipc.msq",
+ NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_SIZE,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd_allocated = rrddim_add(st_size, "allocated", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rd_used = rrddim_add(st_size, "used", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st_size);
+
+ rrddim_set_by_pointer(st_size, rd_allocated, ipc_msq.allocsize);
+ rrddim_set_by_pointer(st_size, rd_used, ipc_msq.usedsize);
+ rrdset_done(st_size);
+ }
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// uptime
+
+int do_uptime(int update_every, usec_t dt) {
+ (void)dt;
+ struct timespec up_time;
+
+ clock_gettime(CLOCK_UPTIME, &up_time);
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system",
+ "uptime",
+ NULL,
+ "uptime",
+ NULL,
+ "System Uptime",
+ "seconds",
+ "freebsd.plugin",
+ "uptime",
+ NETDATA_CHART_PRIO_SYSTEM_UPTIME,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd = rrddim_add(st, "uptime", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd, up_time.tv_sec);
+ rrdset_done(st);
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// net.isr
+
+int do_net_isr(int update_every, usec_t dt) {
+ (void)dt;
+ static int do_netisr = -1, do_netisr_per_core = -1;
+
+ if (unlikely(do_netisr == -1)) {
+ do_netisr = config_get_boolean("plugin:freebsd:net.isr", "netisr", 1);
+ do_netisr_per_core = config_get_boolean("plugin:freebsd:net.isr", "netisr per core", 1);
+ }
+
+ static int mib_workstream[3] = {0, 0, 0}, mib_work[3] = {0, 0, 0};
+ int common_error = 0;
+ size_t netisr_workstream_size = 0, netisr_work_size = 0;
+ unsigned long num_netisr_workstreams = 0, num_netisr_works = 0;
+ static struct sysctl_netisr_workstream *netisr_workstream = NULL;
+ static struct sysctl_netisr_work *netisr_work = NULL;
+ static struct netisr_stats {
+ collected_number dispatched;
+ collected_number hybrid_dispatched;
+ collected_number qdrops;
+ collected_number queued;
+ } *netisr_stats = NULL;
+
+ if (likely(do_netisr || do_netisr_per_core)) {
+ if (unlikely(GETSYSCTL_SIZE("net.isr.workstream", mib_workstream, netisr_workstream_size))) {
+ common_error = 1;
+ } else if (unlikely(GETSYSCTL_SIZE("net.isr.work", mib_work, netisr_work_size))) {
+ common_error = 1;
+ } else {
+ static size_t old_netisr_workstream_size = 0;
+
+ num_netisr_workstreams = netisr_workstream_size / sizeof(struct sysctl_netisr_workstream);
+ if (unlikely(netisr_workstream_size != old_netisr_workstream_size)) {
+ netisr_workstream = reallocz(netisr_workstream,
+ num_netisr_workstreams * sizeof(struct sysctl_netisr_workstream));
+ old_netisr_workstream_size = netisr_workstream_size;
+ }
+ if (unlikely(GETSYSCTL_WSIZE("net.isr.workstream", mib_workstream, netisr_workstream,
+ num_netisr_workstreams * sizeof(struct sysctl_netisr_workstream)))){
+ common_error = 1;
+ } else {
+ static size_t old_netisr_work_size = 0;
+
+ num_netisr_works = netisr_work_size / sizeof(struct sysctl_netisr_work);
+ if (unlikely(netisr_work_size != old_netisr_work_size)) {
+ netisr_work = reallocz(netisr_work, num_netisr_works * sizeof(struct sysctl_netisr_work));
+ old_netisr_work_size = netisr_work_size;
+ }
+ if (unlikely(GETSYSCTL_WSIZE("net.isr.work", mib_work, netisr_work,
+ num_netisr_works * sizeof(struct sysctl_netisr_work)))){
+ common_error = 1;
+ }
+ }
+ }
+ if (unlikely(common_error)) {
+ do_netisr = 0;
+ error("DISABLED: system.softnet_stat chart");
+ do_netisr_per_core = 0;
+ error("DISABLED: system.cpuX_softnet_stat chart");
+ common_error = 0;
+ error("DISABLED: net.isr module");
+ return 1;
+ } else {
+ unsigned long i, n;
+ int j;
+ static int old_number_of_cpus = 0;
+
+ if (unlikely(number_of_cpus != old_number_of_cpus)) {
+ netisr_stats = reallocz(netisr_stats, (number_of_cpus + 1) * sizeof(struct netisr_stats));
+ old_number_of_cpus = number_of_cpus;
+ }
+ memset(netisr_stats, 0, (number_of_cpus + 1) * sizeof(struct netisr_stats));
+ for (i = 0; i < num_netisr_workstreams; i++) {
+ for (n = 0; n < num_netisr_works; n++) {
+ if (netisr_workstream[i].snws_wsid == netisr_work[n].snw_wsid) {
+ netisr_stats[netisr_workstream[i].snws_cpu].dispatched += netisr_work[n].snw_dispatched;
+ netisr_stats[netisr_workstream[i].snws_cpu].hybrid_dispatched += netisr_work[n].snw_hybrid_dispatched;
+ netisr_stats[netisr_workstream[i].snws_cpu].qdrops += netisr_work[n].snw_qdrops;
+ netisr_stats[netisr_workstream[i].snws_cpu].queued += netisr_work[n].snw_queued;
+ }
+ }
+ }
+ for (j = 0; j < number_of_cpus; j++) {
+ netisr_stats[number_of_cpus].dispatched += netisr_stats[j].dispatched;
+ netisr_stats[number_of_cpus].hybrid_dispatched += netisr_stats[j].hybrid_dispatched;
+ netisr_stats[number_of_cpus].qdrops += netisr_stats[j].qdrops;
+ netisr_stats[number_of_cpus].queued += netisr_stats[j].queued;
+ }
+ }
+ } else {
+ error("DISABLED: net.isr module");
+ return 1;
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_netisr)) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_dispatched = NULL, *rd_hybrid_dispatched = NULL, *rd_qdrops = NULL, *rd_queued = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system",
+ "softnet_stat",
+ NULL,
+ "softnet_stat",
+ NULL,
+ "System softnet_stat",
+ "events/s",
+ "freebsd.plugin",
+ "net.isr",
+ NETDATA_CHART_PRIO_SYSTEM_SOFTNET_STAT,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd_dispatched = rrddim_add(st, "dispatched", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_hybrid_dispatched = rrddim_add(st, "hybrid_dispatched", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_qdrops = rrddim_add(st, "qdrops", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_queued = rrddim_add(st, "queued", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_dispatched, netisr_stats[number_of_cpus].dispatched);
+ rrddim_set_by_pointer(st, rd_hybrid_dispatched, netisr_stats[number_of_cpus].hybrid_dispatched);
+ rrddim_set_by_pointer(st, rd_qdrops, netisr_stats[number_of_cpus].qdrops);
+ rrddim_set_by_pointer(st, rd_queued, netisr_stats[number_of_cpus].queued);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_netisr_per_core)) {
+ static struct softnet_chart {
+ char netisr_cpuid[MAX_INT_DIGITS + 17];
+ RRDSET *st;
+ RRDDIM *rd_dispatched;
+ RRDDIM *rd_hybrid_dispatched;
+ RRDDIM *rd_qdrops;
+ RRDDIM *rd_queued;
+ } *all_softnet_charts = NULL;
+ static int old_number_of_cpus = 0;
+ int i;
+
+ if(unlikely(number_of_cpus > old_number_of_cpus)) {
+ all_softnet_charts = reallocz(all_softnet_charts, sizeof(struct softnet_chart) * number_of_cpus);
+ memset(&all_softnet_charts[old_number_of_cpus], 0, sizeof(struct softnet_chart) * (number_of_cpus - old_number_of_cpus));
+ old_number_of_cpus = number_of_cpus;
+ }
+
+ for (i = 0; i < number_of_cpus ;i++) {
+ snprintfz(all_softnet_charts[i].netisr_cpuid, MAX_INT_DIGITS + 17, "cpu%d_softnet_stat", i);
+
+ if (unlikely(!all_softnet_charts[i].st)) {
+ all_softnet_charts[i].st = rrdset_create_localhost(
+ "cpu",
+ all_softnet_charts[i].netisr_cpuid,
+ NULL,
+ "softnet_stat",
+ NULL,
+ "Per CPU netisr statistics",
+ "events/s",
+ "freebsd.plugin",
+ "net.isr",
+ NETDATA_CHART_PRIO_SOFTNET_PER_CORE + i,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ all_softnet_charts[i].rd_dispatched = rrddim_add(all_softnet_charts[i].st, "dispatched",
+ NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ all_softnet_charts[i].rd_hybrid_dispatched = rrddim_add(all_softnet_charts[i].st, "hybrid_dispatched",
+ NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ all_softnet_charts[i].rd_qdrops = rrddim_add(all_softnet_charts[i].st, "qdrops",
+ NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ all_softnet_charts[i].rd_queued = rrddim_add(all_softnet_charts[i].st, "queued",
+ NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(all_softnet_charts[i].st);
+
+ rrddim_set_by_pointer(all_softnet_charts[i].st, all_softnet_charts[i].rd_dispatched,
+ netisr_stats[i].dispatched);
+ rrddim_set_by_pointer(all_softnet_charts[i].st, all_softnet_charts[i].rd_hybrid_dispatched,
+ netisr_stats[i].hybrid_dispatched);
+ rrddim_set_by_pointer(all_softnet_charts[i].st, all_softnet_charts[i].rd_qdrops,
+ netisr_stats[i].qdrops);
+ rrddim_set_by_pointer(all_softnet_charts[i].st, all_softnet_charts[i].rd_queued,
+ netisr_stats[i].queued);
+ rrdset_done(all_softnet_charts[i].st);
+ }
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// net.inet.tcp.states
+
+int do_net_inet_tcp_states(int update_every, usec_t dt) {
+ (void)dt;
+ static int mib[4] = {0, 0, 0, 0};
+ uint64_t tcps_states[TCP_NSTATES];
+
+ // see http://net-snmp.sourceforge.net/docs/mibs/tcp.html
+ if (unlikely(GETSYSCTL_SIMPLE("net.inet.tcp.states", mib, tcps_states))) {
+ error("DISABLED: ipv4.tcpsock chart");
+ error("DISABLED: net.inet.tcp.states module");
+ return 1;
+ } else {
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4",
+ "tcpsock",
+ NULL,
+ "tcp",
+ NULL,
+ "IPv4 TCP Connections",
+ "active connections",
+ "freebsd.plugin",
+ "net.inet.tcp.states",
+ 2500,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd = rrddim_add(st, "CurrEstab", "connections", 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd, tcps_states[TCPS_ESTABLISHED]);
+ rrdset_done(st);
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// net.inet.tcp.stats
+
+int do_net_inet_tcp_stats(int update_every, usec_t dt) {
+ (void)dt;
+ static int do_tcp_packets = -1, do_tcp_errors = -1, do_tcp_handshake = -1, do_tcpext_connaborts = -1, do_tcpext_ofo = -1,
+ do_tcpext_syncookies = -1, do_tcpext_listen = -1, do_ecn = -1;
+
+ if (unlikely(do_tcp_packets == -1)) {
+ do_tcp_packets = config_get_boolean("plugin:freebsd:net.inet.tcp.stats", "ipv4 TCP packets", 1);
+ do_tcp_errors = config_get_boolean("plugin:freebsd:net.inet.tcp.stats", "ipv4 TCP errors", 1);
+ do_tcp_handshake = config_get_boolean("plugin:freebsd:net.inet.tcp.stats", "ipv4 TCP handshake issues", 1);
+ do_tcpext_connaborts = config_get_boolean_ondemand("plugin:freebsd:net.inet.tcp.stats", "TCP connection aborts",
+ CONFIG_BOOLEAN_AUTO);
+ do_tcpext_ofo = config_get_boolean_ondemand("plugin:freebsd:net.inet.tcp.stats", "TCP out-of-order queue",
+ CONFIG_BOOLEAN_AUTO);
+ do_tcpext_syncookies = config_get_boolean_ondemand("plugin:freebsd:net.inet.tcp.stats", "TCP SYN cookies",
+ CONFIG_BOOLEAN_AUTO);
+ do_tcpext_listen = config_get_boolean_ondemand("plugin:freebsd:net.inet.tcp.stats", "TCP listen issues",
+ CONFIG_BOOLEAN_AUTO);
+ do_ecn = config_get_boolean_ondemand("plugin:freebsd:net.inet.tcp.stats", "ECN packets",
+ CONFIG_BOOLEAN_AUTO);
+ }
+
+ // see http://net-snmp.sourceforge.net/docs/mibs/tcp.html
+ if (likely(do_tcp_packets || do_tcp_errors || do_tcp_handshake || do_tcpext_connaborts || do_tcpext_ofo ||
+ do_tcpext_syncookies || do_tcpext_listen || do_ecn)) {
+ static int mib[4] = {0, 0, 0, 0};
+ struct tcpstat tcpstat;
+
+ if (unlikely(GETSYSCTL_SIMPLE("net.inet.tcp.stats", mib, tcpstat))) {
+ do_tcp_packets = 0;
+ error("DISABLED: ipv4.tcppackets chart");
+ do_tcp_errors = 0;
+ error("DISABLED: ipv4.tcperrors chart");
+ do_tcp_handshake = 0;
+ error("DISABLED: ipv4.tcphandshake chart");
+ do_tcpext_connaborts = 0;
+ error("DISABLED: ipv4.tcpconnaborts chart");
+ do_tcpext_ofo = 0;
+ error("DISABLED: ipv4.tcpofo chart");
+ do_tcpext_syncookies = 0;
+ error("DISABLED: ipv4.tcpsyncookies chart");
+ do_tcpext_listen = 0;
+ error("DISABLED: ipv4.tcplistenissues chart");
+ do_ecn = 0;
+ error("DISABLED: ipv4.ecnpkts chart");
+ error("DISABLED: net.inet.tcp.stats module");
+ return 1;
+ } else {
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_tcp_packets)) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in_segs = NULL, *rd_out_segs = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4",
+ "tcppackets",
+ NULL,
+ "tcp",
+ NULL,
+ "IPv4 TCP Packets",
+ "packets/s",
+ "freebsd.plugin",
+ "net.inet.tcp.stats",
+ 2600,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd_in_segs = rrddim_add(st, "InSegs", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_segs = rrddim_add(st, "OutSegs", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_in_segs, tcpstat.tcps_rcvtotal);
+ rrddim_set_by_pointer(st, rd_out_segs, tcpstat.tcps_sndtotal);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_tcp_errors)) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in_errs = NULL, *rd_in_csum_errs = NULL, *rd_retrans_segs = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4",
+ "tcperrors",
+ NULL,
+ "tcp",
+ NULL,
+ "IPv4 TCP Errors",
+ "packets/s",
+ "freebsd.plugin",
+ "net.inet.tcp.stats",
+ 2700,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_in_errs = rrddim_add(st, "InErrs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_csum_errs = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_retrans_segs = rrddim_add(st, "RetransSegs", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+#if __FreeBSD__ >= 11
+ rrddim_set_by_pointer(st, rd_in_errs, tcpstat.tcps_rcvbadoff + tcpstat.tcps_rcvreassfull +
+ tcpstat.tcps_rcvshort);
+#else
+ rrddim_set_by_pointer(st, rd_in_errs, tcpstat.tcps_rcvbadoff + tcpstat.tcps_rcvshort);
+#endif
+ rrddim_set_by_pointer(st, rd_in_csum_errs, tcpstat.tcps_rcvbadsum);
+ rrddim_set_by_pointer(st, rd_retrans_segs, tcpstat.tcps_sndrexmitpack);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_tcp_handshake)) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_estab_resets = NULL, *rd_active_opens = NULL, *rd_passive_opens = NULL,
+ *rd_attempt_fails = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4",
+ "tcphandshake",
+ NULL,
+ "tcp",
+ NULL,
+ "IPv4 TCP Handshake Issues",
+ "events/s",
+ "freebsd.plugin",
+ "net.inet.tcp.stats",
+ 2900,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_estab_resets = rrddim_add(st, "EstabResets", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_active_opens = rrddim_add(st, "ActiveOpens", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_passive_opens = rrddim_add(st, "PassiveOpens", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_attempt_fails = rrddim_add(st, "AttemptFails", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_estab_resets, tcpstat.tcps_drops);
+ rrddim_set_by_pointer(st, rd_active_opens, tcpstat.tcps_connattempt);
+ rrddim_set_by_pointer(st, rd_passive_opens, tcpstat.tcps_accepts);
+ rrddim_set_by_pointer(st, rd_attempt_fails, tcpstat.tcps_conndrops);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_tcpext_connaborts == CONFIG_BOOLEAN_YES || (do_tcpext_connaborts == CONFIG_BOOLEAN_AUTO && (tcpstat.tcps_rcvpackafterwin || tcpstat.tcps_rcvafterclose || tcpstat.tcps_rcvmemdrop || tcpstat.tcps_persistdrop || tcpstat.tcps_finwait2_drops))) {
+ do_tcpext_connaborts = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_on_data = NULL, *rd_on_close = NULL, *rd_on_memory = NULL,
+ *rd_on_timeout = NULL, *rd_on_linger = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4",
+ "tcpconnaborts",
+ NULL,
+ "tcp",
+ NULL,
+ "TCP Connection Aborts",
+ "connections/s",
+ "freebsd.plugin",
+ "net.inet.tcp.stats",
+ 3010,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd_on_data = rrddim_add(st, "TCPAbortOnData", "baddata", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_on_close = rrddim_add(st, "TCPAbortOnClose", "userclosed", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_on_memory = rrddim_add(st, "TCPAbortOnMemory", "nomemory", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_on_timeout = rrddim_add(st, "TCPAbortOnTimeout", "timeout", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_on_linger = rrddim_add(st, "TCPAbortOnLinger", "linger", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_on_data, tcpstat.tcps_rcvpackafterwin);
+ rrddim_set_by_pointer(st, rd_on_close, tcpstat.tcps_rcvafterclose);
+ rrddim_set_by_pointer(st, rd_on_memory, tcpstat.tcps_rcvmemdrop);
+ rrddim_set_by_pointer(st, rd_on_timeout, tcpstat.tcps_persistdrop);
+ rrddim_set_by_pointer(st, rd_on_linger, tcpstat.tcps_finwait2_drops);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_tcpext_ofo == CONFIG_BOOLEAN_YES || (do_tcpext_ofo == CONFIG_BOOLEAN_AUTO && tcpstat.tcps_rcvoopack)) {
+ do_tcpext_ofo = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_ofo_queue = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4",
+ "tcpofo",
+ NULL,
+ "tcp",
+ NULL,
+ "TCP Out-Of-Order Queue",
+ "packets/s",
+ "freebsd.plugin",
+ "net.inet.tcp.stats",
+ 3050,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd_ofo_queue = rrddim_add(st, "TCPOFOQueue", "inqueue", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_ofo_queue, tcpstat.tcps_rcvoopack);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_tcpext_syncookies == CONFIG_BOOLEAN_YES || (do_tcpext_syncookies == CONFIG_BOOLEAN_AUTO && (tcpstat.tcps_sc_sendcookie || tcpstat.tcps_sc_recvcookie || tcpstat.tcps_sc_zonefail))) {
+ do_tcpext_syncookies = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_recv = NULL, *rd_send = NULL, *rd_failed = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4",
+ "tcpsyncookies",
+ NULL,
+ "tcp",
+ NULL,
+ "TCP SYN Cookies",
+ "packets/s",
+ "freebsd.plugin",
+ "net.inet.tcp.stats",
+ 3100,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd_recv = rrddim_add(st, "SyncookiesRecv", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_send = rrddim_add(st, "SyncookiesSent", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_failed = rrddim_add(st, "SyncookiesFailed", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_recv, tcpstat.tcps_sc_recvcookie);
+ rrddim_set_by_pointer(st, rd_send, tcpstat.tcps_sc_sendcookie);
+ rrddim_set_by_pointer(st, rd_failed, tcpstat.tcps_sc_zonefail);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_tcpext_listen == CONFIG_BOOLEAN_YES || (do_tcpext_listen == CONFIG_BOOLEAN_AUTO && tcpstat.tcps_listendrop)) {
+ do_tcpext_listen = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_listen = NULL;
+ static RRDDIM *rd_overflows = NULL;
+
+ if(unlikely(!st_listen)) {
+
+ st_listen = rrdset_create_localhost(
+ "ipv4",
+ "tcplistenissues",
+ NULL,
+ "tcp",
+ NULL,
+ "TCP Listen Socket Issues",
+ "packets/s",
+ "freebsd.plugin",
+ "net.inet.tcp.stats",
+ 3015,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd_overflows = rrddim_add(st_listen, "ListenOverflows", "overflows", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_listen);
+
+ rrddim_set_by_pointer(st_listen, rd_overflows, tcpstat.tcps_listendrop);
+
+ rrdset_done(st_listen);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_ecn == CONFIG_BOOLEAN_YES || (do_ecn == CONFIG_BOOLEAN_AUTO && (tcpstat.tcps_ecn_ce || tcpstat.tcps_ecn_ect0 || tcpstat.tcps_ecn_ect1))) {
+ do_ecn = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_ce = NULL, *rd_no_ect = NULL, *rd_ect0 = NULL, *rd_ect1 = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4",
+ "ecnpkts",
+ NULL,
+ "ecn",
+ NULL,
+ "IPv4 ECN Statistics",
+ "packets/s",
+ "freebsd.plugin",
+ "net.inet.tcp.stats",
+ 8700,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_ce = rrddim_add(st, "InCEPkts", "CEP", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_no_ect = rrddim_add(st, "InNoECTPkts", "NoECTP", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_ect0 = rrddim_add(st, "InECT0Pkts", "ECTP0", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_ect1 = rrddim_add(st, "InECT1Pkts", "ECTP1", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_ce, tcpstat.tcps_ecn_ce);
+ rrddim_set_by_pointer(st, rd_no_ect, tcpstat.tcps_ecn_ce - (tcpstat.tcps_ecn_ect0 +
+ tcpstat.tcps_ecn_ect1));
+ rrddim_set_by_pointer(st, rd_ect0, tcpstat.tcps_ecn_ect0);
+ rrddim_set_by_pointer(st, rd_ect1, tcpstat.tcps_ecn_ect1);
+ rrdset_done(st);
+ }
+
+ }
+ } else {
+ error("DISABLED: net.inet.tcp.stats module");
+ return 1;
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// net.inet.udp.stats
+
+int do_net_inet_udp_stats(int update_every, usec_t dt) {
+ (void)dt;
+ static int do_udp_packets = -1, do_udp_errors = -1;
+
+ if (unlikely(do_udp_packets == -1)) {
+ do_udp_packets = config_get_boolean("plugin:freebsd:net.inet.udp.stats", "ipv4 UDP packets", 1);
+ do_udp_errors = config_get_boolean("plugin:freebsd:net.inet.udp.stats", "ipv4 UDP errors", 1);
+ }
+
+ // see http://net-snmp.sourceforge.net/docs/mibs/udp.html
+ if (likely(do_udp_packets || do_udp_errors)) {
+ static int mib[4] = {0, 0, 0, 0};
+ struct udpstat udpstat;
+
+ if (unlikely(GETSYSCTL_SIMPLE("net.inet.udp.stats", mib, udpstat))) {
+ do_udp_packets = 0;
+ error("DISABLED: ipv4.udppackets chart");
+ do_udp_errors = 0;
+ error("DISABLED: ipv4.udperrors chart");
+ error("DISABLED: net.inet.udp.stats module");
+ return 1;
+ } else {
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_udp_packets)) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4",
+ "udppackets",
+ NULL,
+ "udp",
+ NULL,
+ "IPv4 UDP Packets",
+ "packets/s",
+ "freebsd.plugin",
+ "net.inet.udp.stats",
+ 2601,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd_in = rrddim_add(st, "InDatagrams", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st, "OutDatagrams", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_in, udpstat.udps_ipackets);
+ rrddim_set_by_pointer(st, rd_out, udpstat.udps_opackets);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_udp_errors)) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in_errors = NULL, *rd_no_ports = NULL, *rd_recv_buf_errors = NULL,
+ *rd_in_csum_errors = NULL, *rd_ignored_multi = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4",
+ "udperrors",
+ NULL,
+ "udp",
+ NULL,
+ "IPv4 UDP Errors",
+ "events/s",
+ "freebsd.plugin",
+ "net.inet.udp.stats",
+ 2701,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_in_errors = rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_no_ports = rrddim_add(st, "NoPorts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_recv_buf_errors = rrddim_add(st, "RcvbufErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_csum_errors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_ignored_multi = rrddim_add(st, "IgnoredMulti", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_in_errors, udpstat.udps_hdrops + udpstat.udps_badlen);
+ rrddim_set_by_pointer(st, rd_no_ports, udpstat.udps_noport);
+ rrddim_set_by_pointer(st, rd_recv_buf_errors, udpstat.udps_fullsock);
+ rrddim_set_by_pointer(st, rd_in_csum_errors, udpstat.udps_badsum + udpstat.udps_nosum);
+ rrddim_set_by_pointer(st, rd_ignored_multi, udpstat.udps_filtermcast);
+ rrdset_done(st);
+ }
+ }
+ } else {
+ error("DISABLED: net.inet.udp.stats module");
+ return 1;
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// net.inet.icmp.stats
+
+int do_net_inet_icmp_stats(int update_every, usec_t dt) {
+ (void)dt;
+ static int do_icmp_packets = -1, do_icmp_errors = -1, do_icmpmsg = -1;
+
+ if (unlikely(do_icmp_packets == -1)) {
+ do_icmp_packets = config_get_boolean("plugin:freebsd:net.inet.icmp.stats", "ipv4 ICMP packets", 1);
+ do_icmp_errors = config_get_boolean("plugin:freebsd:net.inet.icmp.stats", "ipv4 ICMP errors", 1);
+ do_icmpmsg = config_get_boolean("plugin:freebsd:net.inet.icmp.stats", "ipv4 ICMP messages", 1);
+ }
+
+ if (likely(do_icmp_packets || do_icmp_errors || do_icmpmsg)) {
+ static int mib[4] = {0, 0, 0, 0};
+ struct icmpstat icmpstat;
+ int i;
+ struct icmp_total {
+ u_long msgs_in;
+ u_long msgs_out;
+ } icmp_total = {0, 0};
+
+ if (unlikely(GETSYSCTL_SIMPLE("net.inet.icmp.stats", mib, icmpstat))) {
+ do_icmp_packets = 0;
+ error("DISABLED: ipv4.icmp chart");
+ do_icmp_errors = 0;
+ error("DISABLED: ipv4.icmp_errors chart");
+ do_icmpmsg = 0;
+ error("DISABLED: ipv4.icmpmsg chart");
+ error("DISABLED: net.inet.icmp.stats module");
+ return 1;
+ } else {
+ for (i = 0; i <= ICMP_MAXTYPE; i++) {
+ icmp_total.msgs_in += icmpstat.icps_inhist[i];
+ icmp_total.msgs_out += icmpstat.icps_outhist[i];
+ }
+ icmp_total.msgs_in += icmpstat.icps_badcode + icmpstat.icps_badlen + icmpstat.icps_checksum + icmpstat.icps_tooshort;
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_icmp_packets)) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "icmp"
+ , NULL
+ , "icmp"
+ , NULL
+ , "IPv4 ICMP Packets"
+ , "packets/s"
+ , "freebsd.plugin"
+ , "net.inet.icmp.stats"
+ , 2602
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_in = rrddim_add(st, "InMsgs", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st, "OutMsgs", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_in, icmp_total.msgs_in);
+ rrddim_set_by_pointer(st, rd_out, icmp_total.msgs_out);
+
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_icmp_errors)) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL, *rd_in_csum = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "icmp_errors"
+ , NULL
+ , "icmp"
+ , NULL
+ , "IPv4 ICMP Errors"
+ , "packets/s"
+ , "freebsd.plugin"
+ , "net.inet.icmp.stats"
+ , 2603
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_in = rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st, "OutErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_csum = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_in, icmpstat.icps_badcode + icmpstat.icps_badlen +
+ icmpstat.icps_checksum + icmpstat.icps_tooshort);
+ rrddim_set_by_pointer(st, rd_out, icmpstat.icps_error);
+ rrddim_set_by_pointer(st, rd_in_csum, icmpstat.icps_checksum);
+
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_icmpmsg)) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in_reps = NULL, *rd_out_reps = NULL, *rd_in = NULL, *rd_out = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "icmpmsg"
+ , NULL
+ , "icmp"
+ , NULL
+ , "IPv4 ICMP Messages"
+ , "packets/s"
+ , "freebsd.plugin"
+ , "net.inet.icmp.stats"
+ , 2604
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_in_reps = rrddim_add(st, "InEchoReps", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_reps = rrddim_add(st, "OutEchoReps", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in = rrddim_add(st, "InEchos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st, "OutEchos", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_in_reps, icmpstat.icps_inhist[ICMP_ECHOREPLY]);
+ rrddim_set_by_pointer(st, rd_out_reps, icmpstat.icps_outhist[ICMP_ECHOREPLY]);
+ rrddim_set_by_pointer(st, rd_in, icmpstat.icps_inhist[ICMP_ECHO]);
+ rrddim_set_by_pointer(st, rd_out, icmpstat.icps_outhist[ICMP_ECHO]);
+
+ rrdset_done(st);
+ }
+ }
+ } else {
+ error("DISABLED: net.inet.icmp.stats module");
+ return 1;
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// net.inet.ip.stats
+
+int do_net_inet_ip_stats(int update_every, usec_t dt) {
+ (void)dt;
+ static int do_ip_packets = -1, do_ip_fragsout = -1, do_ip_fragsin = -1, do_ip_errors = -1;
+
+ if (unlikely(do_ip_packets == -1)) {
+ do_ip_packets = config_get_boolean("plugin:freebsd:net.inet.ip.stats", "ipv4 packets", 1);
+ do_ip_fragsout = config_get_boolean("plugin:freebsd:net.inet.ip.stats", "ipv4 fragments sent", 1);
+ do_ip_fragsin = config_get_boolean("plugin:freebsd:net.inet.ip.stats", "ipv4 fragments assembly", 1);
+ do_ip_errors = config_get_boolean("plugin:freebsd:net.inet.ip.stats", "ipv4 errors", 1);
+ }
+
+ // see also http://net-snmp.sourceforge.net/docs/mibs/ip.html
+ if (likely(do_ip_packets || do_ip_fragsout || do_ip_fragsin || do_ip_errors)) {
+ static int mib[4] = {0, 0, 0, 0};
+ struct ipstat ipstat;
+
+ if (unlikely(GETSYSCTL_SIMPLE("net.inet.ip.stats", mib, ipstat))) {
+ do_ip_packets = 0;
+ error("DISABLED: ipv4.packets chart");
+ do_ip_fragsout = 0;
+ error("DISABLED: ipv4.fragsout chart");
+ do_ip_fragsin = 0;
+ error("DISABLED: ipv4.fragsin chart");
+ do_ip_errors = 0;
+ error("DISABLED: ipv4.errors chart");
+ error("DISABLED: net.inet.ip.stats module");
+ return 1;
+ } else {
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_ip_packets)) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in_receives = NULL, *rd_out_requests = NULL, *rd_forward_datagrams = NULL,
+ *rd_in_delivers = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4",
+ "packets",
+ NULL,
+ "packets",
+ NULL,
+ "IPv4 Packets",
+ "packets/s",
+ "freebsd.plugin",
+ "net.inet.ip.stats",
+ 3000,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd_in_receives = rrddim_add(st, "InReceives", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_requests = rrddim_add(st, "OutRequests", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_forward_datagrams = rrddim_add(st, "ForwDatagrams", "forwarded", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_delivers = rrddim_add(st, "InDelivers", "delivered", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_in_receives, ipstat.ips_total);
+ rrddim_set_by_pointer(st, rd_out_requests, ipstat.ips_localout);
+ rrddim_set_by_pointer(st, rd_forward_datagrams, ipstat.ips_forward);
+ rrddim_set_by_pointer(st, rd_in_delivers, ipstat.ips_delivered);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_ip_fragsout)) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_ok = NULL, *rd_fails = NULL, *rd_created = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4",
+ "fragsout",
+ NULL,
+ "fragments",
+ NULL,
+ "IPv4 Fragments Sent",
+ "packets/s",
+ "freebsd.plugin",
+ "net.inet.ip.stats",
+ 3010,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_ok = rrddim_add(st, "FragOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_fails = rrddim_add(st, "FragFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_created = rrddim_add(st, "FragCreates", "created", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_ok, ipstat.ips_fragmented);
+ rrddim_set_by_pointer(st, rd_fails, ipstat.ips_cantfrag);
+ rrddim_set_by_pointer(st, rd_created, ipstat.ips_ofragments);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_ip_fragsin)) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_ok = NULL, *rd_failed = NULL, *rd_all = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4",
+ "fragsin",
+ NULL,
+ "fragments",
+ NULL,
+ "IPv4 Fragments Reassembly",
+ "packets/s",
+ "freebsd.plugin",
+ "net.inet.ip.stats",
+ 3011,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_ok = rrddim_add(st, "ReasmOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_failed = rrddim_add(st, "ReasmFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_all = rrddim_add(st, "ReasmReqds", "all", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_ok, ipstat.ips_fragments);
+ rrddim_set_by_pointer(st, rd_failed, ipstat.ips_fragdropped);
+ rrddim_set_by_pointer(st, rd_all, ipstat.ips_reassembled);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_ip_errors)) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in_discards = NULL, *rd_out_discards = NULL,
+ *rd_in_hdr_errors = NULL, *rd_out_no_routes = NULL,
+ *rd_in_addr_errors = NULL, *rd_in_unknown_protos = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4",
+ "errors",
+ NULL,
+ "errors",
+ NULL,
+ "IPv4 Errors",
+ "packets/s",
+ "freebsd.plugin",
+ "net.inet.ip.stats",
+ 3002,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_in_discards = rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_discards = rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_hdr_errors = rrddim_add(st, "InHdrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_no_routes = rrddim_add(st, "OutNoRoutes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_addr_errors = rrddim_add(st, "InAddrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_unknown_protos = rrddim_add(st, "InUnknownProtos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_in_discards, ipstat.ips_badsum + ipstat.ips_tooshort +
+ ipstat.ips_toosmall + ipstat.ips_toolong);
+ rrddim_set_by_pointer(st, rd_out_discards, ipstat.ips_odropped);
+ rrddim_set_by_pointer(st, rd_in_hdr_errors, ipstat.ips_badhlen + ipstat.ips_badlen +
+ ipstat.ips_badoptions + ipstat.ips_badvers);
+ rrddim_set_by_pointer(st, rd_out_no_routes, ipstat.ips_noroute);
+ rrddim_set_by_pointer(st, rd_in_addr_errors, ipstat.ips_badaddr);
+ rrddim_set_by_pointer(st, rd_in_unknown_protos, ipstat.ips_noproto);
+ rrdset_done(st);
+ }
+ }
+ } else {
+ error("DISABLED: net.inet.ip.stats module");
+ return 1;
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// net.inet6.ip6.stats
+
+int do_net_inet6_ip6_stats(int update_every, usec_t dt) {
+ (void)dt;
+ static int do_ip6_packets = -1, do_ip6_fragsout = -1, do_ip6_fragsin = -1, do_ip6_errors = -1;
+
+ if (unlikely(do_ip6_packets == -1)) {
+ do_ip6_packets = config_get_boolean_ondemand("plugin:freebsd:net.inet6.ip6.stats", "ipv6 packets",
+ CONFIG_BOOLEAN_AUTO);
+ do_ip6_fragsout = config_get_boolean_ondemand("plugin:freebsd:net.inet6.ip6.stats", "ipv6 fragments sent",
+ CONFIG_BOOLEAN_AUTO);
+ do_ip6_fragsin = config_get_boolean_ondemand("plugin:freebsd:net.inet6.ip6.stats", "ipv6 fragments assembly",
+ CONFIG_BOOLEAN_AUTO);
+ do_ip6_errors = config_get_boolean_ondemand("plugin:freebsd:net.inet6.ip6.stats", "ipv6 errors",
+ CONFIG_BOOLEAN_AUTO);
+ }
+
+ if (likely(do_ip6_packets || do_ip6_fragsout || do_ip6_fragsin || do_ip6_errors)) {
+ static int mib[4] = {0, 0, 0, 0};
+ struct ip6stat ip6stat;
+
+ if (unlikely(GETSYSCTL_SIMPLE("net.inet6.ip6.stats", mib, ip6stat))) {
+ do_ip6_packets = 0;
+ error("DISABLED: ipv6.packets chart");
+ do_ip6_fragsout = 0;
+ error("DISABLED: ipv6.fragsout chart");
+ do_ip6_fragsin = 0;
+ error("DISABLED: ipv6.fragsin chart");
+ do_ip6_errors = 0;
+ error("DISABLED: ipv6.errors chart");
+ error("DISABLED: net.inet6.ip6.stats module");
+ return 1;
+ } else {
+
+ // --------------------------------------------------------------------
+
+ if (do_ip6_packets == CONFIG_BOOLEAN_YES || (do_ip6_packets == CONFIG_BOOLEAN_AUTO &&
+ (ip6stat.ip6s_localout || ip6stat.ip6s_total ||
+ ip6stat.ip6s_forward || ip6stat.ip6s_delivered))) {
+ do_ip6_packets = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_received = NULL, *rd_sent = NULL, *rd_forwarded = NULL, *rd_delivers = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6",
+ "packets",
+ NULL,
+ "packets",
+ NULL,
+ "IPv6 Packets",
+ "packets/s",
+ "freebsd.plugin",
+ "net.inet6.ip6.stats",
+ 3000,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd_received = rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_sent = rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_forwarded = rrddim_add(st, "forwarded", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_delivers = rrddim_add(st, "delivers", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_sent, ip6stat.ip6s_localout);
+ rrddim_set_by_pointer(st, rd_received, ip6stat.ip6s_total);
+ rrddim_set_by_pointer(st, rd_forwarded, ip6stat.ip6s_forward);
+ rrddim_set_by_pointer(st, rd_delivers, ip6stat.ip6s_delivered);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_ip6_fragsout == CONFIG_BOOLEAN_YES || (do_ip6_fragsout == CONFIG_BOOLEAN_AUTO &&
+ (ip6stat.ip6s_fragmented || ip6stat.ip6s_cantfrag ||
+ ip6stat.ip6s_ofragments))) {
+ do_ip6_fragsout = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_ok = NULL, *rd_failed = NULL, *rd_all = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6",
+ "fragsout",
+ NULL,
+ "fragments",
+ NULL,
+ "IPv6 Fragments Sent",
+ "packets/s",
+ "freebsd.plugin",
+ "net.inet6.ip6.stats",
+ 3010,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_ok = rrddim_add(st, "ok", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_failed = rrddim_add(st, "failed", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_all = rrddim_add(st, "all", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_ok, ip6stat.ip6s_fragmented);
+ rrddim_set_by_pointer(st, rd_failed, ip6stat.ip6s_cantfrag);
+ rrddim_set_by_pointer(st, rd_all, ip6stat.ip6s_ofragments);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_ip6_fragsin == CONFIG_BOOLEAN_YES || (do_ip6_fragsin == CONFIG_BOOLEAN_AUTO &&
+ (ip6stat.ip6s_reassembled || ip6stat.ip6s_fragdropped ||
+ ip6stat.ip6s_fragtimeout || ip6stat.ip6s_fragments))) {
+ do_ip6_fragsin = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_ok = NULL, *rd_failed = NULL, *rd_timeout = NULL, *rd_all = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6",
+ "fragsin",
+ NULL,
+ "fragments",
+ NULL,
+ "IPv6 Fragments Reassembly",
+ "packets/s",
+ "freebsd.plugin",
+ "net.inet6.ip6.stats",
+ 3011,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_ok = rrddim_add(st, "ok", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_failed = rrddim_add(st, "failed", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_timeout = rrddim_add(st, "timeout", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_all = rrddim_add(st, "all", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_ok, ip6stat.ip6s_reassembled);
+ rrddim_set_by_pointer(st, rd_failed, ip6stat.ip6s_fragdropped);
+ rrddim_set_by_pointer(st, rd_timeout, ip6stat.ip6s_fragtimeout);
+ rrddim_set_by_pointer(st, rd_all, ip6stat.ip6s_fragments);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_ip6_errors == CONFIG_BOOLEAN_YES || (do_ip6_errors == CONFIG_BOOLEAN_AUTO && (
+ ip6stat.ip6s_toosmall ||
+ ip6stat.ip6s_odropped ||
+ ip6stat.ip6s_badoptions ||
+ ip6stat.ip6s_badvers ||
+ ip6stat.ip6s_exthdrtoolong ||
+ ip6stat.ip6s_sources_none ||
+ ip6stat.ip6s_tooshort ||
+ ip6stat.ip6s_cantforward ||
+ ip6stat.ip6s_noroute))) {
+ do_ip6_errors = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in_discards = NULL, *rd_out_discards = NULL,
+ *rd_in_hdr_errors = NULL, *rd_in_addr_errors = NULL, *rd_in_truncated_pkts = NULL,
+ *rd_in_no_routes = NULL, *rd_out_no_routes = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6",
+ "errors",
+ NULL,
+ "errors",
+ NULL,
+ "IPv6 Errors",
+ "packets/s",
+ "freebsd.plugin",
+ "net.inet6.ip6.stats",
+ 3002,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_in_discards = rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_discards = rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_hdr_errors = rrddim_add(st, "InHdrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_addr_errors = rrddim_add(st, "InAddrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_truncated_pkts = rrddim_add(st, "InTruncatedPkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_no_routes = rrddim_add(st, "InNoRoutes", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_no_routes = rrddim_add(st, "OutNoRoutes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_in_discards, ip6stat.ip6s_toosmall);
+ rrddim_set_by_pointer(st, rd_out_discards, ip6stat.ip6s_odropped);
+ rrddim_set_by_pointer(st, rd_in_hdr_errors, ip6stat.ip6s_badoptions + ip6stat.ip6s_badvers +
+ ip6stat.ip6s_exthdrtoolong);
+ rrddim_set_by_pointer(st, rd_in_addr_errors, ip6stat.ip6s_sources_none);
+ rrddim_set_by_pointer(st, rd_in_truncated_pkts, ip6stat.ip6s_tooshort);
+ rrddim_set_by_pointer(st, rd_in_no_routes, ip6stat.ip6s_cantforward);
+ rrddim_set_by_pointer(st, rd_out_no_routes, ip6stat.ip6s_noroute);
+ rrdset_done(st);
+ }
+ }
+ } else {
+ error("DISABLED: net.inet6.ip6.stats module");
+ return 1;
+ }
+
+ return 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// net.inet6.icmp6.stats
+
+int do_net_inet6_icmp6_stats(int update_every, usec_t dt) {
+ (void)dt;
+ static int do_icmp6 = -1, do_icmp6_redir = -1, do_icmp6_errors = -1, do_icmp6_echos = -1, do_icmp6_router = -1,
+ do_icmp6_neighbor = -1, do_icmp6_types = -1;
+
+ if (unlikely(do_icmp6 == -1)) {
+ do_icmp6 = config_get_boolean_ondemand("plugin:freebsd:net.inet6.icmp6.stats", "icmp",
+ CONFIG_BOOLEAN_AUTO);
+ do_icmp6_redir = config_get_boolean_ondemand("plugin:freebsd:net.inet6.icmp6.stats", "icmp redirects",
+ CONFIG_BOOLEAN_AUTO);
+ do_icmp6_errors = config_get_boolean_ondemand("plugin:freebsd:net.inet6.icmp6.stats", "icmp errors",
+ CONFIG_BOOLEAN_AUTO);
+ do_icmp6_echos = config_get_boolean_ondemand("plugin:freebsd:net.inet6.icmp6.stats", "icmp echos",
+ CONFIG_BOOLEAN_AUTO);
+ do_icmp6_router = config_get_boolean_ondemand("plugin:freebsd:net.inet6.icmp6.stats", "icmp router",
+ CONFIG_BOOLEAN_AUTO);
+ do_icmp6_neighbor = config_get_boolean_ondemand("plugin:freebsd:net.inet6.icmp6.stats", "icmp neighbor",
+ CONFIG_BOOLEAN_AUTO);
+ do_icmp6_types = config_get_boolean_ondemand("plugin:freebsd:net.inet6.icmp6.stats", "icmp types",
+ CONFIG_BOOLEAN_AUTO);
+ }
+
+ if (likely(do_icmp6 || do_icmp6_redir || do_icmp6_errors || do_icmp6_echos || do_icmp6_router || do_icmp6_neighbor || do_icmp6_types)) {
+ static int mib[4] = {0, 0, 0, 0};
+ struct icmp6stat icmp6stat;
+
+ if (unlikely(GETSYSCTL_SIMPLE("net.inet6.icmp6.stats", mib, icmp6stat))) {
+ do_icmp6 = 0;
+ error("DISABLED: ipv6.icmp chart");
+ do_icmp6_redir = 0;
+ error("DISABLED: ipv6.icmpredir chart");
+ do_icmp6_errors = 0;
+ error("DISABLED: ipv6.icmperrors chart");
+ do_icmp6_echos = 0;
+ error("DISABLED: ipv6.icmpechos chart");
+ do_icmp6_router = 0;
+ error("DISABLED: ipv6.icmprouter chart");
+ do_icmp6_neighbor = 0;
+ error("DISABLED: ipv6.icmpneighbor chart");
+ do_icmp6_types = 0;
+ error("DISABLED: ipv6.icmptypes chart");
+ error("DISABLED: net.inet6.icmp6.stats module");
+ return 1;
+ } else {
+ int i;
+ struct icmp6_total {
+ u_long msgs_in;
+ u_long msgs_out;
+ } icmp6_total = {0, 0};
+
+ for (i = 0; i <= ICMP6_MAXTYPE; i++) {
+ icmp6_total.msgs_in += icmp6stat.icp6s_inhist[i];
+ icmp6_total.msgs_out += icmp6stat.icp6s_outhist[i];
+ }
+ icmp6_total.msgs_in += icmp6stat.icp6s_badcode + icmp6stat.icp6s_badlen + icmp6stat.icp6s_checksum + icmp6stat.icp6s_tooshort;
+
+ // --------------------------------------------------------------------
+
+ if (do_icmp6 == CONFIG_BOOLEAN_YES || (do_icmp6 == CONFIG_BOOLEAN_AUTO && (icmp6_total.msgs_in || icmp6_total.msgs_out))) {
+ do_icmp6 = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_received = NULL, *rd_sent = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6",
+ "icmp",
+ NULL,
+ "icmp",
+ NULL,
+ "IPv6 ICMP Messages",
+ "messages/s",
+ "freebsd.plugin",
+ "net.inet6.icmp6.stats",
+ 10000,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd_received = rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_sent = rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_received, icmp6_total.msgs_out);
+ rrddim_set_by_pointer(st, rd_sent, icmp6_total.msgs_in);
+
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_icmp6_redir == CONFIG_BOOLEAN_YES || (do_icmp6_redir == CONFIG_BOOLEAN_AUTO && (icmp6stat.icp6s_inhist[ND_REDIRECT] || icmp6stat.icp6s_outhist[ND_REDIRECT]))) {
+ do_icmp6_redir = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_received = NULL, *rd_sent = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6",
+ "icmpredir",
+ NULL,
+ "icmp",
+ NULL,
+ "IPv6 ICMP Redirects",
+ "redirects/s",
+ "freebsd.plugin",
+ "net.inet6.icmp6.stats",
+ 10050,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd_received = rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_sent = rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_received, icmp6stat.icp6s_outhist[ND_REDIRECT]);
+ rrddim_set_by_pointer(st, rd_sent, icmp6stat.icp6s_inhist[ND_REDIRECT]);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_icmp6_errors == CONFIG_BOOLEAN_YES || (do_icmp6_errors == CONFIG_BOOLEAN_AUTO && (
+ icmp6stat.icp6s_badcode ||
+ icmp6stat.icp6s_badlen ||
+ icmp6stat.icp6s_checksum ||
+ icmp6stat.icp6s_tooshort ||
+ icmp6stat.icp6s_error ||
+ icmp6stat.icp6s_inhist[ICMP6_DST_UNREACH] ||
+ icmp6stat.icp6s_inhist[ICMP6_TIME_EXCEEDED] ||
+ icmp6stat.icp6s_inhist[ICMP6_PARAM_PROB] ||
+ icmp6stat.icp6s_outhist[ICMP6_DST_UNREACH] ||
+ icmp6stat.icp6s_outhist[ICMP6_TIME_EXCEEDED] ||
+ icmp6stat.icp6s_outhist[ICMP6_PARAM_PROB]))) {
+ do_icmp6_errors = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in_errors = NULL, *rd_out_errors = NULL, *rd_in_csum_errors = NULL,
+ *rd_in_dest_unreachs = NULL, *rd_in_pkt_too_bigs = NULL, *rd_in_time_excds = NULL,
+ *rd_in_parm_problems = NULL, *rd_out_dest_unreachs = NULL, *rd_out_time_excds = NULL,
+ *rd_out_parm_problems = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6",
+ "icmperrors",
+ NULL, "icmp",
+ NULL,
+ "IPv6 ICMP Errors",
+ "errors/s",
+ "freebsd.plugin",
+ "net.inet6.icmp6.stats",
+ 10100,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd_in_errors = rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_errors = rrddim_add(st, "OutErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_csum_errors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_dest_unreachs = rrddim_add(st, "InDestUnreachs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_pkt_too_bigs = rrddim_add(st, "InPktTooBigs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_time_excds = rrddim_add(st, "InTimeExcds", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_parm_problems = rrddim_add(st, "InParmProblems", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_dest_unreachs = rrddim_add(st, "OutDestUnreachs", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_time_excds = rrddim_add(st, "OutTimeExcds", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_parm_problems = rrddim_add(st, "OutParmProblems", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_in_errors, icmp6stat.icp6s_badcode + icmp6stat.icp6s_badlen +
+ icmp6stat.icp6s_checksum + icmp6stat.icp6s_tooshort);
+ rrddim_set_by_pointer(st, rd_out_errors, icmp6stat.icp6s_error);
+ rrddim_set_by_pointer(st, rd_in_csum_errors, icmp6stat.icp6s_checksum);
+ rrddim_set_by_pointer(st, rd_in_dest_unreachs, icmp6stat.icp6s_inhist[ICMP6_DST_UNREACH]);
+ rrddim_set_by_pointer(st, rd_in_pkt_too_bigs, icmp6stat.icp6s_badlen);
+ rrddim_set_by_pointer(st, rd_in_time_excds, icmp6stat.icp6s_inhist[ICMP6_TIME_EXCEEDED]);
+ rrddim_set_by_pointer(st, rd_in_parm_problems, icmp6stat.icp6s_inhist[ICMP6_PARAM_PROB]);
+ rrddim_set_by_pointer(st, rd_out_dest_unreachs, icmp6stat.icp6s_outhist[ICMP6_DST_UNREACH]);
+ rrddim_set_by_pointer(st, rd_out_time_excds, icmp6stat.icp6s_outhist[ICMP6_TIME_EXCEEDED]);
+ rrddim_set_by_pointer(st, rd_out_parm_problems, icmp6stat.icp6s_outhist[ICMP6_PARAM_PROB]);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_icmp6_echos == CONFIG_BOOLEAN_YES || (do_icmp6_echos == CONFIG_BOOLEAN_AUTO && (
+ icmp6stat.icp6s_inhist[ICMP6_ECHO_REQUEST] ||
+ icmp6stat.icp6s_outhist[ICMP6_ECHO_REQUEST] ||
+ icmp6stat.icp6s_inhist[ICMP6_ECHO_REPLY] ||
+ icmp6stat.icp6s_outhist[ICMP6_ECHO_REPLY]))) {
+ do_icmp6_echos = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL, *rd_in_replies = NULL, *rd_out_replies = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6",
+ "icmpechos",
+ NULL,
+ "icmp",
+ NULL,
+ "IPv6 ICMP Echo",
+ "messages/s",
+ "freebsd.plugin",
+ "net.inet6.icmp6.stats",
+ 10200,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd_in = rrddim_add(st, "InEchos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st, "OutEchos", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_replies = rrddim_add(st, "InEchoReplies", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_replies = rrddim_add(st, "OutEchoReplies", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_in, icmp6stat.icp6s_inhist[ICMP6_ECHO_REQUEST]);
+ rrddim_set_by_pointer(st, rd_out, icmp6stat.icp6s_outhist[ICMP6_ECHO_REQUEST]);
+ rrddim_set_by_pointer(st, rd_in_replies, icmp6stat.icp6s_inhist[ICMP6_ECHO_REPLY]);
+ rrddim_set_by_pointer(st, rd_out_replies, icmp6stat.icp6s_outhist[ICMP6_ECHO_REPLY]);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_icmp6_router == CONFIG_BOOLEAN_YES || (do_icmp6_router == CONFIG_BOOLEAN_AUTO && (
+ icmp6stat.icp6s_inhist[ND_ROUTER_SOLICIT] ||
+ icmp6stat.icp6s_outhist[ND_ROUTER_SOLICIT] ||
+ icmp6stat.icp6s_inhist[ND_ROUTER_ADVERT] ||
+ icmp6stat.icp6s_outhist[ND_ROUTER_ADVERT]))) {
+ do_icmp6_router = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in_solicits = NULL, *rd_out_solicits = NULL,
+ *rd_in_advertisements = NULL, *rd_out_advertisements = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6",
+ "icmprouter",
+ NULL,
+ "icmp",
+ NULL,
+ "IPv6 Router Messages",
+ "messages/s",
+ "freebsd.plugin",
+ "net.inet6.icmp6.stats",
+ 10400,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd_in_solicits = rrddim_add(st, "InSolicits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_solicits = rrddim_add(st, "OutSolicits", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_advertisements = rrddim_add(st, "InAdvertisements", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_advertisements = rrddim_add(st, "OutAdvertisements", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_in_solicits, icmp6stat.icp6s_inhist[ND_ROUTER_SOLICIT]);
+ rrddim_set_by_pointer(st, rd_out_solicits, icmp6stat.icp6s_outhist[ND_ROUTER_SOLICIT]);
+ rrddim_set_by_pointer(st, rd_in_advertisements, icmp6stat.icp6s_inhist[ND_ROUTER_ADVERT]);
+ rrddim_set_by_pointer(st, rd_out_advertisements, icmp6stat.icp6s_outhist[ND_ROUTER_ADVERT]);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_icmp6_neighbor == CONFIG_BOOLEAN_YES || (do_icmp6_neighbor == CONFIG_BOOLEAN_AUTO && (
+ icmp6stat.icp6s_inhist[ND_NEIGHBOR_SOLICIT] ||
+ icmp6stat.icp6s_outhist[ND_NEIGHBOR_SOLICIT] ||
+ icmp6stat.icp6s_inhist[ND_NEIGHBOR_ADVERT] ||
+ icmp6stat.icp6s_outhist[ND_NEIGHBOR_ADVERT]))) {
+ do_icmp6_neighbor = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in_solicits = NULL, *rd_out_solicits = NULL,
+ *rd_in_advertisements = NULL, *rd_out_advertisements = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6",
+ "icmpneighbor",
+ NULL,
+ "icmp",
+ NULL,
+ "IPv6 Neighbor Messages",
+ "messages/s",
+ "freebsd.plugin",
+ "net.inet6.icmp6.stats",
+ 10500,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd_in_solicits = rrddim_add(st, "InSolicits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_solicits = rrddim_add(st, "OutSolicits", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_advertisements = rrddim_add(st, "InAdvertisements", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_advertisements = rrddim_add(st, "OutAdvertisements", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_in_solicits, icmp6stat.icp6s_inhist[ND_NEIGHBOR_SOLICIT]);
+ rrddim_set_by_pointer(st, rd_out_solicits, icmp6stat.icp6s_outhist[ND_NEIGHBOR_SOLICIT]);
+ rrddim_set_by_pointer(st, rd_in_advertisements, icmp6stat.icp6s_inhist[ND_NEIGHBOR_ADVERT]);
+ rrddim_set_by_pointer(st, rd_out_advertisements, icmp6stat.icp6s_outhist[ND_NEIGHBOR_ADVERT]);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_icmp6_types == CONFIG_BOOLEAN_YES || (do_icmp6_types == CONFIG_BOOLEAN_AUTO && (
+ icmp6stat.icp6s_inhist[1] ||
+ icmp6stat.icp6s_inhist[128] ||
+ icmp6stat.icp6s_inhist[129] ||
+ icmp6stat.icp6s_inhist[136] ||
+ icmp6stat.icp6s_outhist[1] ||
+ icmp6stat.icp6s_outhist[128] ||
+ icmp6stat.icp6s_outhist[129] ||
+ icmp6stat.icp6s_outhist[133] ||
+ icmp6stat.icp6s_outhist[135] ||
+ icmp6stat.icp6s_outhist[136]))) {
+ do_icmp6_types = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in_1 = NULL, *rd_in_128 = NULL, *rd_in_129 = NULL, *rd_in_136 = NULL,
+ *rd_out_1 = NULL, *rd_out_128 = NULL, *rd_out_129 = NULL, *rd_out_133 = NULL,
+ *rd_out_135 = NULL, *rd_out_143 = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6",
+ "icmptypes",
+ NULL,
+ "icmp",
+ NULL,
+ "IPv6 ICMP Types",
+ "messages/s",
+ "freebsd.plugin",
+ "net.inet6.icmp6.stats",
+ 10700,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rd_in_1 = rrddim_add(st, "InType1", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_128 = rrddim_add(st, "InType128", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_129 = rrddim_add(st, "InType129", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_in_136 = rrddim_add(st, "InType136", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_1 = rrddim_add(st, "OutType1", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_128 = rrddim_add(st, "OutType128", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_129 = rrddim_add(st, "OutType129", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_133 = rrddim_add(st, "OutType133", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_135 = rrddim_add(st, "OutType135", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out_143 = rrddim_add(st, "OutType143", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_in_1, icmp6stat.icp6s_inhist[1]);
+ rrddim_set_by_pointer(st, rd_in_128, icmp6stat.icp6s_inhist[128]);
+ rrddim_set_by_pointer(st, rd_in_129, icmp6stat.icp6s_inhist[129]);
+ rrddim_set_by_pointer(st, rd_in_136, icmp6stat.icp6s_inhist[136]);
+ rrddim_set_by_pointer(st, rd_out_1, icmp6stat.icp6s_outhist[1]);
+ rrddim_set_by_pointer(st, rd_out_128, icmp6stat.icp6s_outhist[128]);
+ rrddim_set_by_pointer(st, rd_out_129, icmp6stat.icp6s_outhist[129]);
+ rrddim_set_by_pointer(st, rd_out_133, icmp6stat.icp6s_outhist[133]);
+ rrddim_set_by_pointer(st, rd_out_135, icmp6stat.icp6s_outhist[135]);
+ rrddim_set_by_pointer(st, rd_out_143, icmp6stat.icp6s_outhist[143]);
+ rrdset_done(st);
+ }
+ }
+ } else {
+ error("DISABLED: net.inet6.icmp6.stats module");
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/collectors/freebsd.plugin/plugin_freebsd.c b/collectors/freebsd.plugin/plugin_freebsd.c
new file mode 100644
index 000000000..5cde37113
--- /dev/null
+++ b/collectors/freebsd.plugin/plugin_freebsd.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_freebsd.h"
+
+static struct freebsd_module {
+ const char *name;
+ const char *dim;
+
+ int enabled;
+
+ int (*func)(int update_every, usec_t dt);
+ usec_t duration;
+
+ RRDDIM *rd;
+
+} freebsd_modules[] = {
+
+ // system metrics
+ { .name = "kern.cp_time", .dim = "cp_time", .enabled = 1, .func = do_kern_cp_time },
+ { .name = "vm.loadavg", .dim = "loadavg", .enabled = 1, .func = do_vm_loadavg },
+ { .name = "system.ram", .dim = "system_ram", .enabled = 1, .func = do_system_ram },
+ { .name = "vm.swap_info", .dim = "swap", .enabled = 1, .func = do_vm_swap_info },
+ { .name = "vm.stats.vm.v_swappgs", .dim = "swap_io", .enabled = 1, .func = do_vm_stats_sys_v_swappgs },
+ { .name = "vm.vmtotal", .dim = "vmtotal", .enabled = 1, .func = do_vm_vmtotal },
+ { .name = "vm.stats.vm.v_forks", .dim = "forks", .enabled = 1, .func = do_vm_stats_sys_v_forks },
+ { .name = "vm.stats.sys.v_swtch", .dim = "context_swtch", .enabled = 1, .func = do_vm_stats_sys_v_swtch },
+ { .name = "hw.intrcnt", .dim = "hw_intr", .enabled = 1, .func = do_hw_intcnt },
+ { .name = "vm.stats.sys.v_intr", .dim = "dev_intr", .enabled = 1, .func = do_vm_stats_sys_v_intr },
+ { .name = "vm.stats.sys.v_soft", .dim = "soft_intr", .enabled = 1, .func = do_vm_stats_sys_v_soft },
+ { .name = "net.isr", .dim = "net_isr", .enabled = 1, .func = do_net_isr },
+ { .name = "kern.ipc.sem", .dim = "semaphores", .enabled = 1, .func = do_kern_ipc_sem },
+ { .name = "kern.ipc.shm", .dim = "shared_memory", .enabled = 1, .func = do_kern_ipc_shm },
+ { .name = "kern.ipc.msq", .dim = "message_queues", .enabled = 1, .func = do_kern_ipc_msq },
+ { .name = "uptime", .dim = "uptime", .enabled = 1, .func = do_uptime },
+
+ // memory metrics
+ { .name = "vm.stats.vm.v_pgfaults", .dim = "pgfaults", .enabled = 1, .func = do_vm_stats_sys_v_pgfaults },
+
+ // CPU metrics
+ { .name = "kern.cp_times", .dim = "cp_times", .enabled = 1, .func = do_kern_cp_times },
+ { .name = "dev.cpu.temperature", .dim = "cpu_temperature", .enabled = 1, .func = do_dev_cpu_temperature },
+ { .name = "dev.cpu.0.freq", .dim = "cpu_frequency", .enabled = 1, .func = do_dev_cpu_0_freq },
+
+ // disk metrics
+ { .name = "kern.devstat", .dim = "kern_devstat", .enabled = 1, .func = do_kern_devstat },
+ { .name = "getmntinfo", .dim = "getmntinfo", .enabled = 1, .func = do_getmntinfo },
+
+ // network metrics
+ { .name = "net.inet.tcp.states", .dim = "tcp_states", .enabled = 1, .func = do_net_inet_tcp_states },
+ { .name = "net.inet.tcp.stats", .dim = "tcp_stats", .enabled = 1, .func = do_net_inet_tcp_stats },
+ { .name = "net.inet.udp.stats", .dim = "udp_stats", .enabled = 1, .func = do_net_inet_udp_stats },
+ { .name = "net.inet.icmp.stats", .dim = "icmp_stats", .enabled = 1, .func = do_net_inet_icmp_stats },
+ { .name = "net.inet.ip.stats", .dim = "ip_stats", .enabled = 1, .func = do_net_inet_ip_stats },
+ { .name = "net.inet6.ip6.stats", .dim = "ip6_stats", .enabled = 1, .func = do_net_inet6_ip6_stats },
+ { .name = "net.inet6.icmp6.stats", .dim = "icmp6_stats", .enabled = 1, .func = do_net_inet6_icmp6_stats },
+
+ // network interfaces metrics
+ { .name = "getifaddrs", .dim = "getifaddrs", .enabled = 1, .func = do_getifaddrs },
+
+ // ZFS metrics
+ { .name = "kstat.zfs.misc.arcstats", .dim = "arcstats", .enabled = 1, .func = do_kstat_zfs_misc_arcstats },
+ { .name = "kstat.zfs.misc.zio_trim", .dim = "trim", .enabled = 1, .func = do_kstat_zfs_misc_zio_trim },
+
+ // ipfw metrics
+ { .name = "ipfw", .dim = "ipfw", .enabled = 1, .func = do_ipfw },
+
+ // the terminator of this array
+ { .name = NULL, .dim = NULL, .enabled = 0, .func = NULL }
+};
+
+static void freebsd_main_cleanup(void *ptr) {
+ struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
+
+ info("cleaning up...");
+
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+}
+
+void *freebsd_main(void *ptr) {
+ netdata_thread_cleanup_push(freebsd_main_cleanup, ptr);
+
+ int vdo_cpu_netdata = config_get_boolean("plugin:freebsd", "netdata server resources", 1);
+
+ // initialize FreeBSD plugin
+ if (freebsd_plugin_init())
+ netdata_cleanup_and_exit(1);
+
+ // check the enabled status for each module
+ int i;
+ for(i = 0 ; freebsd_modules[i].name ;i++) {
+ struct freebsd_module *pm = &freebsd_modules[i];
+
+ pm->enabled = config_get_boolean("plugin:freebsd", pm->name, pm->enabled);
+ pm->duration = 0ULL;
+ pm->rd = NULL;
+ }
+
+ usec_t step = localhost->rrd_update_every * USEC_PER_SEC;
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+
+ while(!netdata_exit) {
+ usec_t hb_dt = heartbeat_next(&hb, step);
+ usec_t duration = 0ULL;
+
+ if(unlikely(netdata_exit)) break;
+
+ // BEGIN -- the job to be done
+
+ for(i = 0 ; freebsd_modules[i].name ;i++) {
+ struct freebsd_module *pm = &freebsd_modules[i];
+ if(unlikely(!pm->enabled)) continue;
+
+ debug(D_PROCNETDEV_LOOP, "FREEBSD calling %s.", pm->name);
+
+ pm->enabled = !pm->func(localhost->rrd_update_every, hb_dt);
+ pm->duration = heartbeat_monotonic_dt_to_now_usec(&hb) - duration;
+ duration += pm->duration;
+
+ if(unlikely(netdata_exit)) break;
+ }
+
+ // END -- the job is done
+
+ // --------------------------------------------------------------------
+
+ if(vdo_cpu_netdata) {
+ static RRDSET *st = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_find_bytype_localhost("netdata", "plugin_freebsd_modules");
+
+ if(!st) {
+ st = rrdset_create_localhost(
+ "netdata"
+ , "plugin_freebsd_modules"
+ , NULL
+ , "freebsd"
+ , NULL
+ , "NetData FreeBSD Plugin Modules Durations"
+ , "milliseconds/run"
+ , "netdata"
+ , "stats"
+ , 132001
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ for(i = 0 ; freebsd_modules[i].name ;i++) {
+ struct freebsd_module *pm = &freebsd_modules[i];
+ if(unlikely(!pm->enabled)) continue;
+
+ pm->rd = rrddim_add(st, pm->dim, NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ }
+ }
+ }
+ else rrdset_next(st);
+
+ for(i = 0 ; freebsd_modules[i].name ;i++) {
+ struct freebsd_module *pm = &freebsd_modules[i];
+ if(unlikely(!pm->enabled)) continue;
+
+ rrddim_set_by_pointer(st, pm->rd, pm->duration);
+ }
+ rrdset_done(st);
+
+ global_statistics_charts();
+ registry_statistics();
+ }
+ }
+
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
diff --git a/collectors/freebsd.plugin/plugin_freebsd.h b/collectors/freebsd.plugin/plugin_freebsd.h
new file mode 100644
index 000000000..ab46080be
--- /dev/null
+++ b/collectors/freebsd.plugin/plugin_freebsd.h
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_PLUGIN_FREEBSD_H
+#define NETDATA_PLUGIN_FREEBSD_H 1
+
+#include "daemon/common.h"
+
+#if (TARGET_OS == OS_FREEBSD)
+
+#define NETDATA_PLUGIN_HOOK_FREEBSD \
+ { \
+ .name = "PLUGIN[freebsd]", \
+ .config_section = CONFIG_SECTION_PLUGINS, \
+ .config_name = "freebsd", \
+ .enabled = 1, \
+ .thread = NULL, \
+ .init_routine = NULL, \
+ .start_routine = freebsd_main \
+ },
+
+
+#include <sys/sysctl.h>
+
+#define KILO_FACTOR 1024
+#define MEGA_FACTOR 1048576 // 1024 * 1024
+#define GIGA_FACTOR 1073741824 // 1024 * 1024 * 1024
+
+#define MAX_INT_DIGITS 10 // maximum number of digits for int
+
+void *freebsd_main(void *ptr);
+
+extern int freebsd_plugin_init();
+
+extern int do_vm_loadavg(int update_every, usec_t dt);
+extern int do_vm_vmtotal(int update_every, usec_t dt);
+extern int do_kern_cp_time(int update_every, usec_t dt);
+extern int do_kern_cp_times(int update_every, usec_t dt);
+extern int do_dev_cpu_temperature(int update_every, usec_t dt);
+extern int do_dev_cpu_0_freq(int update_every, usec_t dt);
+extern int do_hw_intcnt(int update_every, usec_t dt);
+extern int do_vm_stats_sys_v_intr(int update_every, usec_t dt);
+extern int do_vm_stats_sys_v_soft(int update_every, usec_t dt);
+extern int do_vm_stats_sys_v_swtch(int update_every, usec_t dt);
+extern int do_vm_stats_sys_v_forks(int update_every, usec_t dt);
+extern int do_vm_swap_info(int update_every, usec_t dt);
+extern int do_system_ram(int update_every, usec_t dt);
+extern int do_vm_stats_sys_v_swappgs(int update_every, usec_t dt);
+extern int do_vm_stats_sys_v_pgfaults(int update_every, usec_t dt);
+extern int do_kern_ipc_sem(int update_every, usec_t dt);
+extern int do_kern_ipc_shm(int update_every, usec_t dt);
+extern int do_kern_ipc_msq(int update_every, usec_t dt);
+extern int do_uptime(int update_every, usec_t dt);
+extern int do_net_isr(int update_every, usec_t dt);
+extern int do_net_inet_tcp_states(int update_every, usec_t dt);
+extern int do_net_inet_tcp_stats(int update_every, usec_t dt);
+extern int do_net_inet_udp_stats(int update_every, usec_t dt);
+extern int do_net_inet_icmp_stats(int update_every, usec_t dt);
+extern int do_net_inet_ip_stats(int update_every, usec_t dt);
+extern int do_net_inet6_ip6_stats(int update_every, usec_t dt);
+extern int do_net_inet6_icmp6_stats(int update_every, usec_t dt);
+extern int do_getifaddrs(int update_every, usec_t dt);
+extern int do_getmntinfo(int update_every, usec_t dt);
+extern int do_kern_devstat(int update_every, usec_t dt);
+extern int do_kstat_zfs_misc_arcstats(int update_every, usec_t dt);
+extern int do_kstat_zfs_misc_zio_trim(int update_every, usec_t dt);
+extern int do_ipfw(int update_every, usec_t dt);
+
+#else // (TARGET_OS == OS_FREEBSD)
+
+#define NETDATA_PLUGIN_HOOK_FREEBSD
+
+#endif // (TARGET_OS == OS_FREEBSD)
+
+#endif /* NETDATA_PLUGIN_FREEBSD_H */
diff --git a/collectors/freeipmi.plugin/Makefile.am b/collectors/freeipmi.plugin/Makefile.am
new file mode 100644
index 000000000..19554bed8
--- /dev/null
+++ b/collectors/freeipmi.plugin/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/collectors/freeipmi.plugin/Makefile.in b/collectors/freeipmi.plugin/Makefile.in
new file mode 100644
index 000000000..54a0035c6
--- /dev/null
+++ b/collectors/freeipmi.plugin/Makefile.in
@@ -0,0 +1,464 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = collectors/freeipmi.plugin
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
+ $(dist_noinst_DATA)
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/freeipmi.plugin/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu collectors/freeipmi.plugin/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/collectors/freeipmi.plugin/README.md b/collectors/freeipmi.plugin/README.md
new file mode 100644
index 000000000..f7c5cc148
--- /dev/null
+++ b/collectors/freeipmi.plugin/README.md
@@ -0,0 +1,180 @@
+netdata has a [freeipmi](https://www.gnu.org/software/freeipmi/) plugin.
+
+> FreeIPMI provides in-band and out-of-band IPMI software based on the IPMI v1.5/2.0 specification. The IPMI specification defines a set of interfaces for platform management and is implemented by a number vendors for system management. The features of IPMI that most users will be interested in are sensor monitoring, system event monitoring, power control, and serial-over-LAN (SOL).
+
+## compile `freeipmi.plugin`
+
+1. install `libipmimonitoring-dev` or `libipmimonitoring-devel` (`freeipmi-devel` on RHEL based OS) using the package manager of your system.
+
+2. re-install netdata from source. The installer will detect that the required libraries are now available and will also build `freeipmi.plugin`.
+
+Keep in mind IPMI requires root access, so the plugin is setuid to root.
+
+If you just installed the required IPMI tools, please run at least once the command `ipmimonitoring` and verify it returns sensors information. This command initialises IPMI configuration, so that the netdata plugin will be able to work.
+
+## netdata use
+
+The plugin creates (up to) 8 charts, based on the information collected from IPMI:
+
+1. number of sensors by state
+2. number of events in SEL
+3. Temperatures CELCIUS
+4. Temperatures FAHRENHEIT
+5. Voltages
+6. Currents
+7. Power
+8. Fans
+
+
+It also adds 2 alarms:
+
+1. Sensors in non-nominal state (i.e. warning and critical)
+2. SEL is non empty
+
+![image](https://cloud.githubusercontent.com/assets/2662304/23674138/88926a20-037d-11e7-89c0-20e74ee10cd1.png)
+
+The plugin does a speed test when it starts, to find out the duration needed by the IPMI processor to respond. Depending on the speed of your IPMI processor, charts may need several seconds to show up on the dashboard.
+
+## `freeipmi.plugin` configuration
+
+The plugin supports a few options. To see them, run:
+
+```sh
+# /usr/libexec/netdata/plugins.d/freeipmi.plugin -h
+
+ netdata freeipmi.plugin 1.8.0-546-g72ce5d6b_rolling
+ Copyright (C) 2016-2017 Costa Tsaousis <costa@tsaousis.gr>
+ Released under GNU General Public License v3 or later.
+ All rights reserved.
+
+ This program is a data collector plugin for netdata.
+
+ Available command line options:
+
+ SECONDS data collection frequency
+ minimum: 5
+
+ debug enable verbose output
+ default: disabled
+
+ sel
+ no-sel enable/disable SEL collection
+ default: enabled
+
+ hostname HOST
+ username USER
+ password PASS connect to remote IPMI host
+ default: local IPMI processor
+
+ sdr-cache-dir PATH directory for SDR cache files
+ default: /tmp
+
+ sensor-config-file FILE filename to read sensor configuration
+ default: system default
+
+ ignore N1,N2,N3,... sensor IDs to ignore
+ default: none
+
+ -v
+ -V
+ version print version and exit
+
+ Linux kernel module for IPMI is CPU hungry.
+ On Linux run this to lower kipmiN CPU utilization:
+ # echo 10 > /sys/module/ipmi_si/parameters/kipmid_max_busy_us
+
+ or create: /etc/modprobe.d/ipmi.conf with these contents:
+ options ipmi_si kipmid_max_busy_us=10
+
+ For more information:
+ https://github.com/ktsaou/netdata/tree/master/plugins/freeipmi.plugin
+
+```
+
+You can set these options in `/etc/netdata/netdata.conf` at this section:
+
+```
+[plugin:freeipmi]
+ update every = 5
+ command options =
+```
+
+Append to `command options = ` the settings you need. The minimum `update every` is 5 (enforced internally by the plugin). IPMI is slow and CPU hungry. So, once every 5 seconds is pretty acceptable.
+
+## ignoring specific sensors
+
+Specific sensor IDs can be excluded from freeipmi tools by editing `/etc/freeipmi/freeipmi.conf` and setting the IDs to be ignored at `ipmi-sensors-exclude-record-ids`. **However this file is not used by `libipmimonitoring`** (the library used by netdata's `freeipmi.plugin`).
+
+So, `freeipmi.plugin` supports the option `ignore` that accepts a comma separated list of sensor IDs to ignore. To configure it, edit `/etc/netdata/netdata.conf` and set:
+
+```
+[plugin:freeipmi]
+ command options = ignore 1,2,3,4,...
+```
+
+To find the IDs to ignore, run the command `ipmimonitoring`. The first column is the wanted ID:
+
+```
+ID | Name | Type | State | Reading | Units | Event
+1 | Ambient Temp | Temperature | Nominal | 26.00 | C | 'OK'
+2 | Altitude | Other Units Based Sensor | Nominal | 480.00 | ft | 'OK'
+3 | Avg Power | Current | Nominal | 100.00 | W | 'OK'
+4 | Planar 3.3V | Voltage | Nominal | 3.29 | V | 'OK'
+5 | Planar 5V | Voltage | Nominal | 4.90 | V | 'OK'
+6 | Planar 12V | Voltage | Nominal | 11.99 | V | 'OK'
+7 | Planar VBAT | Voltage | Nominal | 2.95 | V | 'OK'
+8 | Fan 1A Tach | Fan | Nominal | 3132.00 | RPM | 'OK'
+9 | Fan 1B Tach | Fan | Nominal | 2150.00 | RPM | 'OK'
+10 | Fan 2A Tach | Fan | Nominal | 2494.00 | RPM | 'OK'
+11 | Fan 2B Tach | Fan | Nominal | 1825.00 | RPM | 'OK'
+12 | Fan 3A Tach | Fan | Nominal | 3538.00 | RPM | 'OK'
+13 | Fan 3B Tach | Fan | Nominal | 2625.00 | RPM | 'OK'
+14 | Fan 1 | Entity Presence | Nominal | N/A | N/A | 'Entity Present'
+15 | Fan 2 | Entity Presence | Nominal | N/A | N/A | 'Entity Present'
+...
+```
+
+
+## debugging
+
+You can run the plugin by hand:
+
+```sh
+# become user netdata
+sudo su -s /bin/sh netdata
+
+# run the plugin in debug mode
+/usr/libexec/netdata/plugins.d/freeipmi.plugin 5 debug
+```
+
+You will get verbose output on what the plugin does.
+
+## kipmi0 CPU usage
+
+There have been reports that kipmi is showing increased CPU when the IPMI is queried.
+
+[IBM has given a few explanations](http://www-01.ibm.com/support/docview.wss?uid=nas7d580df3d15874988862575fa0050f604).
+
+Check also [this stackexchange post](http://unix.stackexchange.com/questions/74900/kipmi0-eating-up-to-99-8-cpu-on-centos-6-4).
+
+To lower the CPU consumption of the system you can issue this command:
+
+```sh
+echo 10 > /sys/module/ipmi_si/parameters/kipmid_max_busy_us
+```
+
+You can also permanently set the above setting by creating the file `/etc/modprobe.d/ipmi.conf` with this content:
+
+```sh
+# prevent kipmi from consuming 100% CPU
+options ipmi_si kipmid_max_busy_us=10
+```
+
+This instructs the kernel IPMI module to pause for a tick between checking IPMI. Querying IPMI will be a lot slower now (e.g. several seconds for IPMI to respond), but `kipmi` will not use any noticeable CPU. You can also use a higher number (this is the number of microseconds to poll IPMI for a response, before waiting for a tick).
+
+If you need to disable IPMI for netdata, edit `/etc/netdata/netdata.conf` and set:
+
+```
+[plugins]
+ freeipmi = no
+```
diff --git a/collectors/freeipmi.plugin/freeipmi_plugin.c b/collectors/freeipmi.plugin/freeipmi_plugin.c
new file mode 100644
index 000000000..a1cff3af0
--- /dev/null
+++ b/collectors/freeipmi.plugin/freeipmi_plugin.c
@@ -0,0 +1,1760 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+/*
+ * netdata freeipmi.plugin
+ * Copyright (C) 2017 Costa Tsaousis
+ * GPL v3+
+ *
+ * Based on:
+ * ipmimonitoring-sensors.c,v 1.51 2016/11/02 23:46:24 chu11 Exp
+ * ipmimonitoring-sel.c,v 1.51 2016/11/02 23:46:24 chu11 Exp
+ *
+ * Copyright (C) 2007-2015 Lawrence Livermore National Security, LLC.
+ * Copyright (C) 2006-2007 The Regents of the University of California.
+ * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ * Written by Albert Chu <chu11@llnl.gov>
+ * UCRL-CODE-222073
+ */
+
+#include "../../libnetdata/libnetdata.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <assert.h>
+#include <errno.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#ifdef HAVE_FREEIPMI
+
+// ----------------------------------------------------------------------------
+
+// callback required by fatal()
+void netdata_cleanup_and_exit(int ret) {
+ exit(ret);
+}
+
+// callbacks required by popen()
+void signals_block(void) {};
+void signals_unblock(void) {};
+void signals_reset(void) {};
+
+// callback required by eval()
+int health_variable_lookup(const char *variable, uint32_t hash, struct rrdcalc *rc, calculated_number *result) {
+ (void)variable;
+ (void)hash;
+ (void)rc;
+ (void)result;
+ return 0;
+};
+
+// required by get_system_cpus()
+char *netdata_configured_host_prefix = "";
+
+// ----------------------------------------------------------------------------
+
+#include <ipmi_monitoring.h>
+#include <ipmi_monitoring_bitmasks.h>
+
+/* Communication Configuration - Initialize accordingly */
+
+/* Hostname, NULL for In-band communication, non-null for a hostname */
+char *hostname = NULL;
+
+/* In-band Communication Configuration */
+int driver_type = -1; // IPMI_MONITORING_DRIVER_TYPE_KCS; /* or -1 for default */
+int disable_auto_probe = 0; /* probe for in-band device */
+unsigned int driver_address = 0; /* not used if probing */
+unsigned int register_spacing = 0; /* not used if probing */
+char *driver_device = NULL; /* not used if probing */
+
+/* Out-of-band Communication Configuration */
+int protocol_version = -1; //IPMI_MONITORING_PROTOCOL_VERSION_1_5; /* or -1 for default */
+char *username = "foousername";
+char *password = "foopassword";
+unsigned char *ipmi_k_g = NULL;
+unsigned int ipmi_k_g_len = 0;
+int privilege_level = -1; // IPMI_MONITORING_PRIVILEGE_LEVEL_USER; /* or -1 for default */
+int authentication_type = -1; // IPMI_MONITORING_AUTHENTICATION_TYPE_MD5; /* or -1 for default */
+int cipher_suite_id = 0; /* or -1 for default */
+int session_timeout = 0; /* 0 for default */
+int retransmission_timeout = 0; /* 0 for default */
+
+/* Workarounds - specify workaround flags if necessary */
+unsigned int workaround_flags = 0;
+
+/* Initialize w/ record id numbers to only monitor specific record ids */
+unsigned int record_ids[] = {0};
+unsigned int record_ids_length = 0;
+
+/* Initialize w/ sensor types to only monitor specific sensor types
+ * see ipmi_monitoring.h sensor types list.
+ */
+unsigned int sensor_types[] = {0};
+unsigned int sensor_types_length = 0;
+
+/* Set to an appropriate alternate if desired */
+char *sdr_cache_directory = "/tmp";
+char *sensor_config_file = NULL;
+
+/* Set to 1 or 0 to enable these sensor reading flags
+ * - See ipmi_monitoring.h for descriptions of these flags.
+ */
+int reread_sdr_cache = 0;
+int ignore_non_interpretable_sensors = 1;
+int bridge_sensors = 0;
+int interpret_oem_data = 0;
+int shared_sensors = 0;
+int discrete_reading = 0;
+int ignore_scanning_disabled = 0;
+int assume_bmc_owner = 0;
+int entity_sensor_names = 0;
+
+/* Initialization flags
+ *
+ * Most commonly bitwise OR IPMI_MONITORING_FLAGS_DEBUG and/or
+ * IPMI_MONITORING_FLAGS_DEBUG_IPMI_PACKETS for extra debugging
+ * information.
+ */
+unsigned int ipmimonitoring_init_flags = 0;
+
+int errnum;
+
+// ----------------------------------------------------------------------------
+// SEL only variables
+
+/* Initialize w/ date range to only monitoring specific date range */
+char *date_begin = NULL; /* use MM/DD/YYYY format */
+char *date_end = NULL; /* use MM/DD/YYYY format */
+
+int assume_system_event_record = 0;
+
+char *sel_config_file = NULL;
+
+
+// ----------------------------------------------------------------------------
+// functions common to sensors and SEL
+
+static void
+_init_ipmi_config (struct ipmi_monitoring_ipmi_config *ipmi_config)
+{
+ assert (ipmi_config);
+
+ ipmi_config->driver_type = driver_type;
+ ipmi_config->disable_auto_probe = disable_auto_probe;
+ ipmi_config->driver_address = driver_address;
+ ipmi_config->register_spacing = register_spacing;
+ ipmi_config->driver_device = driver_device;
+
+ ipmi_config->protocol_version = protocol_version;
+ ipmi_config->username = username;
+ ipmi_config->password = password;
+ ipmi_config->k_g = ipmi_k_g;
+ ipmi_config->k_g_len = ipmi_k_g_len;
+ ipmi_config->privilege_level = privilege_level;
+ ipmi_config->authentication_type = authentication_type;
+ ipmi_config->cipher_suite_id = cipher_suite_id;
+ ipmi_config->session_timeout_len = session_timeout;
+ ipmi_config->retransmission_timeout_len = retransmission_timeout;
+
+ ipmi_config->workaround_flags = workaround_flags;
+}
+
+#ifdef NETDATA_COMMENTED
+static const char *
+_get_sensor_type_string (int sensor_type)
+{
+ switch (sensor_type)
+ {
+ case IPMI_MONITORING_SENSOR_TYPE_RESERVED:
+ return ("Reserved");
+ case IPMI_MONITORING_SENSOR_TYPE_TEMPERATURE:
+ return ("Temperature");
+ case IPMI_MONITORING_SENSOR_TYPE_VOLTAGE:
+ return ("Voltage");
+ case IPMI_MONITORING_SENSOR_TYPE_CURRENT:
+ return ("Current");
+ case IPMI_MONITORING_SENSOR_TYPE_FAN:
+ return ("Fan");
+ case IPMI_MONITORING_SENSOR_TYPE_PHYSICAL_SECURITY:
+ return ("Physical Security");
+ case IPMI_MONITORING_SENSOR_TYPE_PLATFORM_SECURITY_VIOLATION_ATTEMPT:
+ return ("Platform Security Violation Attempt");
+ case IPMI_MONITORING_SENSOR_TYPE_PROCESSOR:
+ return ("Processor");
+ case IPMI_MONITORING_SENSOR_TYPE_POWER_SUPPLY:
+ return ("Power Supply");
+ case IPMI_MONITORING_SENSOR_TYPE_POWER_UNIT:
+ return ("Power Unit");
+ case IPMI_MONITORING_SENSOR_TYPE_COOLING_DEVICE:
+ return ("Cooling Device");
+ case IPMI_MONITORING_SENSOR_TYPE_OTHER_UNITS_BASED_SENSOR:
+ return ("Other Units Based Sensor");
+ case IPMI_MONITORING_SENSOR_TYPE_MEMORY:
+ return ("Memory");
+ case IPMI_MONITORING_SENSOR_TYPE_DRIVE_SLOT:
+ return ("Drive Slot");
+ case IPMI_MONITORING_SENSOR_TYPE_POST_MEMORY_RESIZE:
+ return ("POST Memory Resize");
+ case IPMI_MONITORING_SENSOR_TYPE_SYSTEM_FIRMWARE_PROGRESS:
+ return ("System Firmware Progress");
+ case IPMI_MONITORING_SENSOR_TYPE_EVENT_LOGGING_DISABLED:
+ return ("Event Logging Disabled");
+ case IPMI_MONITORING_SENSOR_TYPE_WATCHDOG1:
+ return ("Watchdog 1");
+ case IPMI_MONITORING_SENSOR_TYPE_SYSTEM_EVENT:
+ return ("System Event");
+ case IPMI_MONITORING_SENSOR_TYPE_CRITICAL_INTERRUPT:
+ return ("Critical Interrupt");
+ case IPMI_MONITORING_SENSOR_TYPE_BUTTON_SWITCH:
+ return ("Button/Switch");
+ case IPMI_MONITORING_SENSOR_TYPE_MODULE_BOARD:
+ return ("Module/Board");
+ case IPMI_MONITORING_SENSOR_TYPE_MICROCONTROLLER_COPROCESSOR:
+ return ("Microcontroller/Coprocessor");
+ case IPMI_MONITORING_SENSOR_TYPE_ADD_IN_CARD:
+ return ("Add In Card");
+ case IPMI_MONITORING_SENSOR_TYPE_CHASSIS:
+ return ("Chassis");
+ case IPMI_MONITORING_SENSOR_TYPE_CHIP_SET:
+ return ("Chip Set");
+ case IPMI_MONITORING_SENSOR_TYPE_OTHER_FRU:
+ return ("Other Fru");
+ case IPMI_MONITORING_SENSOR_TYPE_CABLE_INTERCONNECT:
+ return ("Cable/Interconnect");
+ case IPMI_MONITORING_SENSOR_TYPE_TERMINATOR:
+ return ("Terminator");
+ case IPMI_MONITORING_SENSOR_TYPE_SYSTEM_BOOT_INITIATED:
+ return ("System Boot Initiated");
+ case IPMI_MONITORING_SENSOR_TYPE_BOOT_ERROR:
+ return ("Boot Error");
+ case IPMI_MONITORING_SENSOR_TYPE_OS_BOOT:
+ return ("OS Boot");
+ case IPMI_MONITORING_SENSOR_TYPE_OS_CRITICAL_STOP:
+ return ("OS Critical Stop");
+ case IPMI_MONITORING_SENSOR_TYPE_SLOT_CONNECTOR:
+ return ("Slot/Connector");
+ case IPMI_MONITORING_SENSOR_TYPE_SYSTEM_ACPI_POWER_STATE:
+ return ("System ACPI Power State");
+ case IPMI_MONITORING_SENSOR_TYPE_WATCHDOG2:
+ return ("Watchdog 2");
+ case IPMI_MONITORING_SENSOR_TYPE_PLATFORM_ALERT:
+ return ("Platform Alert");
+ case IPMI_MONITORING_SENSOR_TYPE_ENTITY_PRESENCE:
+ return ("Entity Presence");
+ case IPMI_MONITORING_SENSOR_TYPE_MONITOR_ASIC_IC:
+ return ("Monitor ASIC/IC");
+ case IPMI_MONITORING_SENSOR_TYPE_LAN:
+ return ("LAN");
+ case IPMI_MONITORING_SENSOR_TYPE_MANAGEMENT_SUBSYSTEM_HEALTH:
+ return ("Management Subsystem Health");
+ case IPMI_MONITORING_SENSOR_TYPE_BATTERY:
+ return ("Battery");
+ case IPMI_MONITORING_SENSOR_TYPE_SESSION_AUDIT:
+ return ("Session Audit");
+ case IPMI_MONITORING_SENSOR_TYPE_VERSION_CHANGE:
+ return ("Version Change");
+ case IPMI_MONITORING_SENSOR_TYPE_FRU_STATE:
+ return ("FRU State");
+ }
+
+ return ("Unrecognized");
+}
+#endif // NETDATA_COMMENTED
+
+
+// ----------------------------------------------------------------------------
+// BEGIN NETDATA CODE
+
+static int debug = 0;
+
+static int netdata_update_every = 5; // this is the minimum update frequency
+static int netdata_priority = 90000;
+static int netdata_do_sel = 1;
+
+static size_t netdata_sensors_updated = 0;
+static size_t netdata_sensors_collected = 0;
+static size_t netdata_sel_events = 0;
+static size_t netdata_sensors_states_nominal = 0;
+static size_t netdata_sensors_states_warning = 0;
+static size_t netdata_sensors_states_critical = 0;
+
+struct sensor {
+ int record_id;
+ int sensor_number;
+ int sensor_type;
+ int sensor_state;
+ int sensor_units;
+ char *sensor_name;
+
+ int sensor_reading_type;
+ union {
+ uint8_t bool_value;
+ uint32_t uint32_value;
+ double double_value;
+ } sensor_reading;
+
+ int sent;
+ int ignore;
+ int exposed;
+ int updated;
+ struct sensor *next;
+} *sensors_root = NULL;
+
+static void netdata_mark_as_not_updated() {
+ struct sensor *sn;
+ for(sn = sensors_root; sn ;sn = sn->next)
+ sn->updated = sn->sent = 0;
+
+ netdata_sensors_updated = 0;
+ netdata_sensors_collected = 0;
+ netdata_sel_events = 0;
+
+ netdata_sensors_states_nominal = 0;
+ netdata_sensors_states_warning = 0;
+ netdata_sensors_states_critical = 0;
+}
+
+static void send_chart_to_netdata_for_units(int units) {
+ struct sensor *sn;
+
+ switch(units) {
+ case IPMI_MONITORING_SENSOR_UNITS_CELSIUS:
+ printf("CHART ipmi.temperatures_c '' 'System Celcius Temperatures read by IPMI' 'Celcius' 'temperatures' 'ipmi.temperatures_c' 'line' %d %d\n"
+ , netdata_priority + 10
+ , netdata_update_every
+ );
+ break;
+
+ case IPMI_MONITORING_SENSOR_UNITS_FAHRENHEIT:
+ printf("CHART ipmi.temperatures_f '' 'System Fahrenheit Temperatures read by IPMI' 'Fahrenheit' 'temperatures' 'ipmi.temperatures_f' 'line' %d %d\n"
+ , netdata_priority + 11
+ , netdata_update_every
+ );
+ break;
+
+ case IPMI_MONITORING_SENSOR_UNITS_VOLTS:
+ printf("CHART ipmi.volts '' 'System Voltages read by IPMI' 'Volts' 'voltages' 'ipmi.voltages' 'line' %d %d\n"
+ , netdata_priority + 12
+ , netdata_update_every
+ );
+ break;
+
+ case IPMI_MONITORING_SENSOR_UNITS_AMPS:
+ printf("CHART ipmi.amps '' 'System Current read by IPMI' 'Amps' 'current' 'ipmi.amps' 'line' %d %d\n"
+ , netdata_priority + 13
+ , netdata_update_every
+ );
+ break;
+
+ case IPMI_MONITORING_SENSOR_UNITS_RPM:
+ printf("CHART ipmi.rpm '' 'System Fans read by IPMI' 'RPM' 'fans' 'ipmi.rpm' 'line' %d %d\n"
+ , netdata_priority + 14
+ , netdata_update_every
+ );
+ break;
+
+ case IPMI_MONITORING_SENSOR_UNITS_WATTS:
+ printf("CHART ipmi.watts '' 'System Power read by IPMI' 'Watts' 'power' 'ipmi.watts' 'line' %d %d\n"
+ , netdata_priority + 5
+ , netdata_update_every
+ );
+ break;
+
+ case IPMI_MONITORING_SENSOR_UNITS_PERCENT:
+ printf("CHART ipmi.percent '' 'System Metrics read by IPMI' '%%' 'other' 'ipmi.percent' 'line' %d %d\n"
+ , netdata_priority + 15
+ , netdata_update_every
+ );
+ break;
+
+ default:
+ for(sn = sensors_root; sn; sn = sn->next)
+ if(sn->sensor_units == units)
+ sn->ignore = 1;
+ return;
+ }
+
+ for(sn = sensors_root; sn; sn = sn->next) {
+ if(sn->sensor_units == units && sn->updated && !sn->ignore) {
+ sn->exposed = 1;
+
+ switch(sn->sensor_reading_type) {
+ case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER8_BOOL:
+ case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER32:
+ printf("DIMENSION i%d_n%d_r%d '%s i%d' absolute 1 1\n"
+ , sn->sensor_number
+ , sn->record_id
+ , sn->sensor_reading_type
+ , sn->sensor_name
+ , sn->sensor_number
+ );
+ break;
+
+ case IPMI_MONITORING_SENSOR_READING_TYPE_DOUBLE:
+ printf("DIMENSION i%d_n%d_r%d '%s i%d' absolute 1 1000\n"
+ , sn->sensor_number
+ , sn->record_id
+ , sn->sensor_reading_type
+ , sn->sensor_name
+ , sn->sensor_number
+ );
+ break;
+
+ default:
+ sn->ignore = 1;
+ break;
+ }
+ }
+ }
+}
+
+static void send_metrics_to_netdata_for_units(int units) {
+ struct sensor *sn;
+
+ switch(units) {
+ case IPMI_MONITORING_SENSOR_UNITS_CELSIUS:
+ printf("BEGIN ipmi.temperatures_c\n");
+ break;
+
+ case IPMI_MONITORING_SENSOR_UNITS_FAHRENHEIT:
+ printf("BEGIN ipmi.temperatures_f\n");
+ break;
+
+ case IPMI_MONITORING_SENSOR_UNITS_VOLTS:
+ printf("BEGIN ipmi.volts\n");
+ break;
+
+ case IPMI_MONITORING_SENSOR_UNITS_AMPS:
+ printf("BEGIN ipmi.amps\n");
+ break;
+
+ case IPMI_MONITORING_SENSOR_UNITS_RPM:
+ printf("BEGIN ipmi.rpm\n");
+ break;
+
+ case IPMI_MONITORING_SENSOR_UNITS_WATTS:
+ printf("BEGIN ipmi.watts\n");
+ break;
+
+ case IPMI_MONITORING_SENSOR_UNITS_PERCENT:
+ printf("BEGIN ipmi.percent\n");
+ break;
+
+ default:
+ for(sn = sensors_root; sn; sn = sn->next)
+ if(sn->sensor_units == units)
+ sn->ignore = 1;
+ return;
+ }
+
+ for(sn = sensors_root; sn; sn = sn->next) {
+ if(sn->sensor_units == units && sn->updated && !sn->sent && !sn->ignore) {
+ netdata_sensors_updated++;
+
+ sn->sent = 1;
+
+ switch(sn->sensor_reading_type) {
+ case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER8_BOOL:
+ printf("SET i%d_n%d_r%d = %u\n"
+ , sn->sensor_number
+ , sn->record_id
+ , sn->sensor_reading_type
+ , sn->sensor_reading.bool_value
+ );
+ break;
+
+ case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER32:
+ printf("SET i%d_n%d_r%d = %u\n"
+ , sn->sensor_number
+ , sn->record_id
+ , sn->sensor_reading_type
+ , sn->sensor_reading.uint32_value
+ );
+ break;
+
+ case IPMI_MONITORING_SENSOR_READING_TYPE_DOUBLE:
+ printf("SET i%d_n%d_r%d = %lld\n"
+ , sn->sensor_number
+ , sn->record_id
+ , sn->sensor_reading_type
+ , (long long int)(sn->sensor_reading.double_value * 1000)
+ );
+ break;
+
+ default:
+ sn->ignore = 1;
+ break;
+ }
+ }
+ }
+
+ printf("END\n");
+}
+
+static void send_metrics_to_netdata() {
+ static int sel_chart_generated = 0, sensors_states_chart_generated = 0;
+ struct sensor *sn;
+
+ if(netdata_do_sel && !sel_chart_generated) {
+ sel_chart_generated = 1;
+ printf("CHART ipmi.events '' 'IPMI Events' 'events' 'events' ipmi.sel area %d %d\n"
+ , netdata_priority + 2
+ , netdata_update_every
+ );
+ printf("DIMENSION events '' absolute 1 1\n");
+ }
+
+ if(!sensors_states_chart_generated) {
+ sensors_states_chart_generated = 1;
+ printf("CHART ipmi.sensors_states '' 'IPMI Sensors State' 'sensors' 'states' ipmi.sensors_states line %d %d\n"
+ , netdata_priority + 1
+ , netdata_update_every
+ );
+ printf("DIMENSION nominal '' absolute 1 1\n");
+ printf("DIMENSION critical '' absolute 1 1\n");
+ printf("DIMENSION warning '' absolute 1 1\n");
+ }
+
+ // generate the CHART/DIMENSION lines, if we have to
+ for(sn = sensors_root; sn; sn = sn->next)
+ if(sn->updated && !sn->exposed && !sn->ignore)
+ send_chart_to_netdata_for_units(sn->sensor_units);
+
+ if(netdata_do_sel) {
+ printf(
+ "BEGIN ipmi.events\n"
+ "SET events = %zu\n"
+ "END\n"
+ , netdata_sel_events
+ );
+ }
+
+ printf(
+ "BEGIN ipmi.sensors_states\n"
+ "SET nominal = %zu\n"
+ "SET warning = %zu\n"
+ "SET critical = %zu\n"
+ "END\n"
+ , netdata_sensors_states_nominal
+ , netdata_sensors_states_warning
+ , netdata_sensors_states_critical
+ );
+
+ // send metrics to netdata
+ for(sn = sensors_root; sn; sn = sn->next)
+ if(sn->updated && sn->exposed && !sn->sent && !sn->ignore)
+ send_metrics_to_netdata_for_units(sn->sensor_units);
+
+}
+
+static int *excluded_record_ids = NULL;
+size_t excluded_record_ids_length = 0;
+
+static void excluded_record_ids_parse(const char *s) {
+ if(!s) return;
+
+ while(*s) {
+ while(*s && !isdigit(*s)) s++;
+
+ if(isdigit(*s)) {
+ char *e;
+ unsigned long n = strtoul(s, &e, 10);
+ s = e;
+
+ if(n != 0) {
+ excluded_record_ids = realloc(excluded_record_ids, (excluded_record_ids_length + 1) * sizeof(int));
+ if(!excluded_record_ids) {
+ fprintf(stderr, "freeipmi.plugin: failed to allocate memory. Exiting.");
+ exit(1);
+ }
+ excluded_record_ids[excluded_record_ids_length++] = (int)n;
+ }
+ }
+ }
+
+ if(debug) {
+ fprintf(stderr, "freeipmi.plugin: excluded record ids:");
+ size_t i;
+ for(i = 0; i < excluded_record_ids_length; i++) {
+ fprintf(stderr, " %d", excluded_record_ids[i]);
+ }
+ fprintf(stderr, "\n");
+ }
+}
+
+static int *excluded_status_record_ids = NULL;
+size_t excluded_status_record_ids_length = 0;
+
+static void excluded_status_record_ids_parse(const char *s) {
+ if(!s) return;
+
+ while(*s) {
+ while(*s && !isdigit(*s)) s++;
+
+ if(isdigit(*s)) {
+ char *e;
+ unsigned long n = strtoul(s, &e, 10);
+ s = e;
+
+ if(n != 0) {
+ excluded_status_record_ids = realloc(excluded_status_record_ids, (excluded_status_record_ids_length + 1) * sizeof(int));
+ if(!excluded_status_record_ids) {
+ fprintf(stderr, "freeipmi.plugin: failed to allocate memory. Exiting.");
+ exit(1);
+ }
+ excluded_status_record_ids[excluded_status_record_ids_length++] = (int)n;
+ }
+ }
+ }
+
+ if(debug) {
+ fprintf(stderr, "freeipmi.plugin: excluded status record ids:");
+ size_t i;
+ for(i = 0; i < excluded_status_record_ids_length; i++) {
+ fprintf(stderr, " %d", excluded_status_record_ids[i]);
+ }
+ fprintf(stderr, "\n");
+ }
+}
+
+
+static int excluded_record_ids_check(int record_id) {
+ size_t i;
+
+ for(i = 0; i < excluded_record_ids_length; i++) {
+ if(excluded_record_ids[i] == record_id)
+ return 1;
+ }
+
+ return 0;
+}
+
+static int excluded_status_record_ids_check(int record_id) {
+ size_t i;
+
+ for(i = 0; i < excluded_status_record_ids_length; i++) {
+ if(excluded_status_record_ids[i] == record_id)
+ return 1;
+ }
+
+ return 0;
+}
+
+static void netdata_get_sensor(
+ int record_id
+ , int sensor_number
+ , int sensor_type
+ , int sensor_state
+ , int sensor_units
+ , int sensor_reading_type
+ , char *sensor_name
+ , void *sensor_reading
+) {
+ // find the sensor record
+ struct sensor *sn;
+ for(sn = sensors_root; sn ;sn = sn->next)
+ if( sn->record_id == record_id &&
+ sn->sensor_number == sensor_number &&
+ sn->sensor_reading_type == sensor_reading_type &&
+ sn->sensor_units == sensor_units &&
+ !strcmp(sn->sensor_name, sensor_name)
+ )
+ break;
+
+ if(!sn) {
+ // not found, create it
+
+ // check if it is excluded
+ if(excluded_record_ids_check(record_id))
+ return;
+
+ sn = calloc(1, sizeof(struct sensor));
+ if(!sn) {
+ fatal("cannot allocate %zu bytes of memory.", sizeof(struct sensor));
+ }
+
+ sn->record_id = record_id;
+ sn->sensor_number = sensor_number;
+ sn->sensor_type = sensor_type;
+ sn->sensor_state = sensor_state;
+ sn->sensor_units = sensor_units;
+ sn->sensor_reading_type = sensor_reading_type;
+ sn->sensor_name = strdup(sensor_name);
+ if(!sn->sensor_name) {
+ fatal("cannot allocate %zu bytes of memory.", strlen(sensor_name));
+ }
+
+ sn->next = sensors_root;
+ sensors_root = sn;
+ }
+
+ switch(sensor_reading_type) {
+ case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER8_BOOL:
+ sn->sensor_reading.bool_value = *((uint8_t *)sensor_reading);
+ sn->updated = 1;
+ netdata_sensors_collected++;
+ break;
+
+ case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER32:
+ sn->sensor_reading.uint32_value = *((uint32_t *)sensor_reading);
+ sn->updated = 1;
+ netdata_sensors_collected++;
+ break;
+
+ case IPMI_MONITORING_SENSOR_READING_TYPE_DOUBLE:
+ sn->sensor_reading.double_value = *((double *)sensor_reading);
+ sn->updated = 1;
+ netdata_sensors_collected++;
+ break;
+
+ default:
+ sn->ignore = 1;
+ break;
+ }
+
+ // check if it is excluded
+ if(excluded_status_record_ids_check(record_id))
+ return;
+
+ switch(sensor_state) {
+ case IPMI_MONITORING_STATE_NOMINAL:
+ netdata_sensors_states_nominal++;
+ break;
+
+ case IPMI_MONITORING_STATE_WARNING:
+ netdata_sensors_states_warning++;
+ break;
+
+ case IPMI_MONITORING_STATE_CRITICAL:
+ netdata_sensors_states_critical++;
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void netdata_get_sel(
+ int record_id
+ , int record_type_class
+ , int sel_state
+) {
+ (void)record_id;
+ (void)record_type_class;
+ (void)sel_state;
+
+ netdata_sel_events++;
+}
+
+
+// END NETDATA CODE
+// ----------------------------------------------------------------------------
+
+
+static int
+_ipmimonitoring_sensors (struct ipmi_monitoring_ipmi_config *ipmi_config)
+{
+ ipmi_monitoring_ctx_t ctx = NULL;
+ unsigned int sensor_reading_flags = 0;
+ int i;
+ int sensor_count;
+ int rv = -1;
+
+ if (!(ctx = ipmi_monitoring_ctx_create ())) {
+ error("ipmi_monitoring_ctx_create()");
+ goto cleanup;
+ }
+
+ if (sdr_cache_directory)
+ {
+ if (ipmi_monitoring_ctx_sdr_cache_directory (ctx,
+ sdr_cache_directory) < 0)
+ {
+ error("ipmi_monitoring_ctx_sdr_cache_directory(): %s\n",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+ }
+
+ /* Must call otherwise only default interpretations ever used */
+ if (sensor_config_file)
+ {
+ if (ipmi_monitoring_ctx_sensor_config_file (ctx,
+ sensor_config_file) < 0)
+ {
+ error( "ipmi_monitoring_ctx_sensor_config_file(): %s\n",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+ }
+ else
+ {
+ if (ipmi_monitoring_ctx_sensor_config_file (ctx, NULL) < 0)
+ {
+ error( "ipmi_monitoring_ctx_sensor_config_file(): %s\n",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+ }
+
+ if (reread_sdr_cache)
+ sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_REREAD_SDR_CACHE;
+
+ if (ignore_non_interpretable_sensors)
+ sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_IGNORE_NON_INTERPRETABLE_SENSORS;
+
+ if (bridge_sensors)
+ sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_BRIDGE_SENSORS;
+
+ if (interpret_oem_data)
+ sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_INTERPRET_OEM_DATA;
+
+ if (shared_sensors)
+ sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_SHARED_SENSORS;
+
+ if (discrete_reading)
+ sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_DISCRETE_READING;
+
+ if (ignore_scanning_disabled)
+ sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_IGNORE_SCANNING_DISABLED;
+
+ if (assume_bmc_owner)
+ sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_ASSUME_BMC_OWNER;
+
+#ifdef IPMI_MONITORING_SENSOR_READING_FLAGS_ENTITY_SENSOR_NAMES
+ if (entity_sensor_names)
+ sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_ENTITY_SENSOR_NAMES;
+#endif // IPMI_MONITORING_SENSOR_READING_FLAGS_ENTITY_SENSOR_NAMES
+
+ if (!record_ids_length && !sensor_types_length)
+ {
+ if ((sensor_count = ipmi_monitoring_sensor_readings_by_record_id (ctx,
+ hostname,
+ ipmi_config,
+ sensor_reading_flags,
+ NULL,
+ 0,
+ NULL,
+ NULL)) < 0)
+ {
+ error( "ipmi_monitoring_sensor_readings_by_record_id(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+ }
+ else if (record_ids_length)
+ {
+ if ((sensor_count = ipmi_monitoring_sensor_readings_by_record_id (ctx,
+ hostname,
+ ipmi_config,
+ sensor_reading_flags,
+ record_ids,
+ record_ids_length,
+ NULL,
+ NULL)) < 0)
+ {
+ error( "ipmi_monitoring_sensor_readings_by_record_id(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+ }
+ else
+ {
+ if ((sensor_count = ipmi_monitoring_sensor_readings_by_sensor_type (ctx,
+ hostname,
+ ipmi_config,
+ sensor_reading_flags,
+ sensor_types,
+ sensor_types_length,
+ NULL,
+ NULL)) < 0)
+ {
+ error( "ipmi_monitoring_sensor_readings_by_sensor_type(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+ }
+
+#ifdef NETDATA_COMMENTED
+ printf ("%s, %s, %s, %s, %s, %s, %s, %s, %s, %s\n",
+ "Record ID",
+ "Sensor Name",
+ "Sensor Number",
+ "Sensor Type",
+ "Sensor State",
+ "Sensor Reading",
+ "Sensor Units",
+ "Sensor Event/Reading Type Code",
+ "Sensor Event Bitmask",
+ "Sensor Event String");
+#endif // NETDATA_COMMENTED
+
+ for (i = 0; i < sensor_count; i++, ipmi_monitoring_sensor_iterator_next (ctx))
+ {
+ int record_id, sensor_number, sensor_type, sensor_state, sensor_units,
+ sensor_reading_type;
+
+#ifdef NETDATA_COMMENTED
+ int sensor_bitmask_type, sensor_bitmask, event_reading_type_code;
+ char **sensor_bitmask_strings = NULL;
+ const char *sensor_type_str;
+ const char *sensor_state_str;
+#endif // NETDATA_COMMENTED
+
+ char *sensor_name = NULL;
+ void *sensor_reading;
+
+ if ((record_id = ipmi_monitoring_sensor_read_record_id (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sensor_read_record_id(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if ((sensor_number = ipmi_monitoring_sensor_read_sensor_number (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sensor_read_sensor_number(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if ((sensor_type = ipmi_monitoring_sensor_read_sensor_type (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sensor_read_sensor_type(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if (!(sensor_name = ipmi_monitoring_sensor_read_sensor_name (ctx)))
+ {
+ error( "ipmi_monitoring_sensor_read_sensor_name(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if ((sensor_state = ipmi_monitoring_sensor_read_sensor_state (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sensor_read_sensor_state(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if ((sensor_units = ipmi_monitoring_sensor_read_sensor_units (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sensor_read_sensor_units(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+#ifdef NETDATA_COMMENTED
+ if ((sensor_bitmask_type = ipmi_monitoring_sensor_read_sensor_bitmask_type (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sensor_read_sensor_bitmask_type(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+ if ((sensor_bitmask = ipmi_monitoring_sensor_read_sensor_bitmask (ctx)) < 0)
+ {
+ error(
+ "ipmi_monitoring_sensor_read_sensor_bitmask(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if (!(sensor_bitmask_strings = ipmi_monitoring_sensor_read_sensor_bitmask_strings (ctx)))
+ {
+ error( "ipmi_monitoring_sensor_read_sensor_bitmask_strings(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+#endif // NETDATA_COMMENTED
+
+ if ((sensor_reading_type = ipmi_monitoring_sensor_read_sensor_reading_type (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sensor_read_sensor_reading_type(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ sensor_reading = ipmi_monitoring_sensor_read_sensor_reading (ctx);
+
+#ifdef NETDATA_COMMENTED
+ if ((event_reading_type_code = ipmi_monitoring_sensor_read_event_reading_type_code (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sensor_read_event_reading_type_code(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+#endif // NETDATA_COMMENTED
+
+ netdata_get_sensor(
+ record_id
+ , sensor_number
+ , sensor_type
+ , sensor_state
+ , sensor_units
+ , sensor_reading_type
+ , sensor_name
+ , sensor_reading
+ );
+
+#ifdef NETDATA_COMMENTED
+ if (!strlen (sensor_name))
+ sensor_name = "N/A";
+
+ sensor_type_str = _get_sensor_type_string (sensor_type);
+
+ printf ("%d, %s, %d, %s",
+ record_id,
+ sensor_name,
+ sensor_number,
+ sensor_type_str);
+
+ if (sensor_state == IPMI_MONITORING_STATE_NOMINAL)
+ sensor_state_str = "Nominal";
+ else if (sensor_state == IPMI_MONITORING_STATE_WARNING)
+ sensor_state_str = "Warning";
+ else if (sensor_state == IPMI_MONITORING_STATE_CRITICAL)
+ sensor_state_str = "Critical";
+ else
+ sensor_state_str = "N/A";
+
+ printf (", %s", sensor_state_str);
+
+ if (sensor_reading)
+ {
+ const char *sensor_units_str;
+
+ if (sensor_reading_type == IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER8_BOOL)
+ printf (", %s",
+ (*((uint8_t *)sensor_reading) ? "true" : "false"));
+ else if (sensor_reading_type == IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER32)
+ printf (", %u",
+ *((uint32_t *)sensor_reading));
+ else if (sensor_reading_type == IPMI_MONITORING_SENSOR_READING_TYPE_DOUBLE)
+ printf (", %.2f",
+ *((double *)sensor_reading));
+ else
+ printf (", N/A");
+
+ if (sensor_units == IPMI_MONITORING_SENSOR_UNITS_CELSIUS)
+ sensor_units_str = "C";
+ else if (sensor_units == IPMI_MONITORING_SENSOR_UNITS_FAHRENHEIT)
+ sensor_units_str = "F";
+ else if (sensor_units == IPMI_MONITORING_SENSOR_UNITS_VOLTS)
+ sensor_units_str = "V";
+ else if (sensor_units == IPMI_MONITORING_SENSOR_UNITS_AMPS)
+ sensor_units_str = "A";
+ else if (sensor_units == IPMI_MONITORING_SENSOR_UNITS_RPM)
+ sensor_units_str = "RPM";
+ else if (sensor_units == IPMI_MONITORING_SENSOR_UNITS_WATTS)
+ sensor_units_str = "W";
+ else if (sensor_units == IPMI_MONITORING_SENSOR_UNITS_PERCENT)
+ sensor_units_str = "%";
+ else
+ sensor_units_str = "N/A";
+
+ printf (", %s", sensor_units_str);
+ }
+ else
+ printf (", N/A, N/A");
+
+ printf (", %Xh", event_reading_type_code);
+
+ /* It is possible you may want to monitor specific event
+ * conditions that may occur. If that is the case, you may want
+ * to check out what specific bitmask type and bitmask events
+ * occurred. See ipmi_monitoring_bitmasks.h for a list of
+ * bitmasks and types.
+ */
+
+ if (sensor_bitmask_type != IPMI_MONITORING_SENSOR_BITMASK_TYPE_UNKNOWN)
+ printf (", %Xh", sensor_bitmask);
+ else
+ printf (", N/A");
+
+ if (sensor_bitmask_type != IPMI_MONITORING_SENSOR_BITMASK_TYPE_UNKNOWN)
+ {
+ unsigned int i = 0;
+
+ printf (",");
+
+ while (sensor_bitmask_strings[i])
+ {
+ printf (" ");
+
+ printf ("'%s'",
+ sensor_bitmask_strings[i]);
+
+ i++;
+ }
+ }
+ else
+ printf (", N/A");
+
+ printf ("\n");
+#endif // NETDATA_COMMENTED
+ }
+
+ rv = 0;
+ cleanup:
+ if (ctx)
+ ipmi_monitoring_ctx_destroy (ctx);
+ return (rv);
+}
+
+
+static int
+_ipmimonitoring_sel (struct ipmi_monitoring_ipmi_config *ipmi_config)
+{
+ ipmi_monitoring_ctx_t ctx = NULL;
+ unsigned int sel_flags = 0;
+ int i;
+ int sel_count;
+ int rv = -1;
+
+ if (!(ctx = ipmi_monitoring_ctx_create ()))
+ {
+ error("ipmi_monitoring_ctx_create()");
+ goto cleanup;
+ }
+
+ if (sdr_cache_directory)
+ {
+ if (ipmi_monitoring_ctx_sdr_cache_directory (ctx,
+ sdr_cache_directory) < 0)
+ {
+ error( "ipmi_monitoring_ctx_sdr_cache_directory(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+ }
+
+ /* Must call otherwise only default interpretations ever used */
+ if (sel_config_file)
+ {
+ if (ipmi_monitoring_ctx_sel_config_file (ctx,
+ sel_config_file) < 0)
+ {
+ error( "ipmi_monitoring_ctx_sel_config_file(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+ }
+ else
+ {
+ if (ipmi_monitoring_ctx_sel_config_file (ctx, NULL) < 0)
+ {
+ error( "ipmi_monitoring_ctx_sel_config_file(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+ }
+
+ if (reread_sdr_cache)
+ sel_flags |= IPMI_MONITORING_SEL_FLAGS_REREAD_SDR_CACHE;
+
+ if (interpret_oem_data)
+ sel_flags |= IPMI_MONITORING_SEL_FLAGS_INTERPRET_OEM_DATA;
+
+ if (assume_system_event_record)
+ sel_flags |= IPMI_MONITORING_SEL_FLAGS_ASSUME_SYSTEM_EVENT_RECORD;
+
+#ifdef IPMI_MONITORING_SEL_FLAGS_ENTITY_SENSOR_NAMES
+ if (entity_sensor_names)
+ sel_flags |= IPMI_MONITORING_SEL_FLAGS_ENTITY_SENSOR_NAMES;
+#endif // IPMI_MONITORING_SEL_FLAGS_ENTITY_SENSOR_NAMES
+
+ if (record_ids_length)
+ {
+ if ((sel_count = ipmi_monitoring_sel_by_record_id (ctx,
+ hostname,
+ ipmi_config,
+ sel_flags,
+ record_ids,
+ record_ids_length,
+ NULL,
+ NULL)) < 0)
+ {
+ error( "ipmi_monitoring_sel_by_record_id(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+ }
+ else if (sensor_types_length)
+ {
+ if ((sel_count = ipmi_monitoring_sel_by_sensor_type (ctx,
+ hostname,
+ ipmi_config,
+ sel_flags,
+ sensor_types,
+ sensor_types_length,
+ NULL,
+ NULL)) < 0)
+ {
+ error( "ipmi_monitoring_sel_by_sensor_type(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+ }
+ else if (date_begin
+ || date_end)
+ {
+ if ((sel_count = ipmi_monitoring_sel_by_date_range (ctx,
+ hostname,
+ ipmi_config,
+ sel_flags,
+ date_begin,
+ date_end,
+ NULL,
+ NULL)) < 0)
+ {
+ error( "ipmi_monitoring_sel_by_sensor_type(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+ }
+ else
+ {
+ if ((sel_count = ipmi_monitoring_sel_by_record_id (ctx,
+ hostname,
+ ipmi_config,
+ sel_flags,
+ NULL,
+ 0,
+ NULL,
+ NULL)) < 0)
+ {
+ error( "ipmi_monitoring_sel_by_record_id(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+ }
+
+#ifdef NETDATA_COMMENTED
+ printf ("%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s\n",
+ "Record ID",
+ "Record Type",
+ "SEL State",
+ "Timestamp",
+ "Sensor Name",
+ "Sensor Type",
+ "Event Direction",
+ "Event Type Code",
+ "Event Data",
+ "Event Offset",
+ "Event Offset String");
+#endif // NETDATA_COMMENTED
+
+ for (i = 0; i < sel_count; i++, ipmi_monitoring_sel_iterator_next (ctx))
+ {
+ int record_id, record_type, sel_state, record_type_class;
+#ifdef NETDATA_COMMENTED
+ int sensor_type, sensor_number, event_direction,
+ event_offset_type, event_offset, event_type_code, manufacturer_id;
+ unsigned int timestamp, event_data1, event_data2, event_data3;
+ char *event_offset_string = NULL;
+ const char *sensor_type_str;
+ const char *event_direction_str;
+ const char *sel_state_str;
+ char *sensor_name = NULL;
+ unsigned char oem_data[64];
+ int oem_data_len;
+ unsigned int j;
+#endif // NETDATA_COMMENTED
+
+ if ((record_id = ipmi_monitoring_sel_read_record_id (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sel_read_record_id(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if ((record_type = ipmi_monitoring_sel_read_record_type (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sel_read_record_type(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if ((record_type_class = ipmi_monitoring_sel_read_record_type_class (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sel_read_record_type_class(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if ((sel_state = ipmi_monitoring_sel_read_sel_state (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sel_read_sel_state(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ netdata_get_sel(
+ record_id
+ , record_type_class
+ , sel_state
+ );
+
+#ifdef NETDATA_COMMENTED
+ if (sel_state == IPMI_MONITORING_STATE_NOMINAL)
+ sel_state_str = "Nominal";
+ else if (sel_state == IPMI_MONITORING_STATE_WARNING)
+ sel_state_str = "Warning";
+ else if (sel_state == IPMI_MONITORING_STATE_CRITICAL)
+ sel_state_str = "Critical";
+ else
+ sel_state_str = "N/A";
+
+ printf ("%d, %d, %s",
+ record_id,
+ record_type,
+ sel_state_str);
+
+ if (record_type_class == IPMI_MONITORING_SEL_RECORD_TYPE_CLASS_SYSTEM_EVENT_RECORD
+ || record_type_class == IPMI_MONITORING_SEL_RECORD_TYPE_CLASS_TIMESTAMPED_OEM_RECORD)
+ {
+
+ if (ipmi_monitoring_sel_read_timestamp (ctx, &timestamp) < 0)
+ {
+ error( "ipmi_monitoring_sel_read_timestamp(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ /* XXX: This should be converted to a nice date output using
+ * your favorite timestamp -> string conversion functions.
+ */
+ printf (", %u", timestamp);
+ }
+ else
+ printf (", N/A");
+
+ if (record_type_class == IPMI_MONITORING_SEL_RECORD_TYPE_CLASS_SYSTEM_EVENT_RECORD)
+ {
+ /* If you are integrating ipmimonitoring SEL into a monitoring application,
+ * you may wish to count the number of times a specific error occurred
+ * and report that to the monitoring application.
+ *
+ * In this particular case, you'll probably want to check out
+ * what sensor type each SEL event is reporting, the
+ * event offset type, and the specific event offset that occurred.
+ *
+ * See ipmi_monitoring_offsets.h for a list of event offsets
+ * and types.
+ */
+
+ if (!(sensor_name = ipmi_monitoring_sel_read_sensor_name (ctx)))
+ {
+ error( "ipmi_monitoring_sel_read_sensor_name(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if ((sensor_type = ipmi_monitoring_sel_read_sensor_type (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sel_read_sensor_type(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if ((sensor_number = ipmi_monitoring_sel_read_sensor_number (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sel_read_sensor_number(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if ((event_direction = ipmi_monitoring_sel_read_event_direction (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sel_read_event_direction(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if ((event_type_code = ipmi_monitoring_sel_read_event_type_code (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sel_read_event_type_code(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if (ipmi_monitoring_sel_read_event_data (ctx,
+ &event_data1,
+ &event_data2,
+ &event_data3) < 0)
+ {
+ error( "ipmi_monitoring_sel_read_event_data(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if ((event_offset_type = ipmi_monitoring_sel_read_event_offset_type (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sel_read_event_offset_type(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if ((event_offset = ipmi_monitoring_sel_read_event_offset (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sel_read_event_offset(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if (!(event_offset_string = ipmi_monitoring_sel_read_event_offset_string (ctx)))
+ {
+ error( "ipmi_monitoring_sel_read_event_offset_string(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if (!strlen (sensor_name))
+ sensor_name = "N/A";
+
+ sensor_type_str = _get_sensor_type_string (sensor_type);
+
+ if (event_direction == IPMI_MONITORING_SEL_EVENT_DIRECTION_ASSERTION)
+ event_direction_str = "Assertion";
+ else
+ event_direction_str = "Deassertion";
+
+ printf (", %s, %s, %d, %s, %Xh, %Xh-%Xh-%Xh",
+ sensor_name,
+ sensor_type_str,
+ sensor_number,
+ event_direction_str,
+ event_type_code,
+ event_data1,
+ event_data2,
+ event_data3);
+
+ if (event_offset_type != IPMI_MONITORING_EVENT_OFFSET_TYPE_UNKNOWN)
+ printf (", %Xh", event_offset);
+ else
+ printf (", N/A");
+
+ if (event_offset_type != IPMI_MONITORING_EVENT_OFFSET_TYPE_UNKNOWN)
+ printf (", %s", event_offset_string);
+ else
+ printf (", N/A");
+ }
+ else if (record_type_class == IPMI_MONITORING_SEL_RECORD_TYPE_CLASS_TIMESTAMPED_OEM_RECORD
+ || record_type_class == IPMI_MONITORING_SEL_RECORD_TYPE_CLASS_NON_TIMESTAMPED_OEM_RECORD)
+ {
+ if (record_type_class == IPMI_MONITORING_SEL_RECORD_TYPE_CLASS_TIMESTAMPED_OEM_RECORD)
+ {
+ if ((manufacturer_id = ipmi_monitoring_sel_read_manufacturer_id (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sel_read_manufacturer_id(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ printf (", Manufacturer ID = %Xh", manufacturer_id);
+ }
+
+ if ((oem_data_len = ipmi_monitoring_sel_read_oem_data (ctx, oem_data, 1024)) < 0)
+ {
+ error( "ipmi_monitoring_sel_read_oem_data(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ printf (", OEM Data = ");
+
+ for (j = 0; j < oem_data_len; j++)
+ printf ("%02Xh ", oem_data[j]);
+ }
+ else
+ printf (", N/A, N/A, N/A, N/A, N/A, N/A, N/A");
+
+ printf ("\n");
+#endif // NETDATA_COMMENTED
+ }
+
+ rv = 0;
+ cleanup:
+ if (ctx)
+ ipmi_monitoring_ctx_destroy (ctx);
+ return (rv);
+}
+
+// ----------------------------------------------------------------------------
+// MAIN PROGRAM FOR NETDATA PLUGIN
+
+int ipmi_collect_data(struct ipmi_monitoring_ipmi_config *ipmi_config) {
+ errno = 0;
+
+ if (_ipmimonitoring_sensors(ipmi_config) < 0) return -1;
+
+ if(netdata_do_sel) {
+ if(_ipmimonitoring_sel(ipmi_config) < 0) return -2;
+ }
+
+ return 0;
+}
+
+int ipmi_detect_speed_secs(struct ipmi_monitoring_ipmi_config *ipmi_config) {
+ int i, checks = 10;
+ unsigned long long total = 0;
+
+ for(i = 0 ; i < checks ; i++) {
+ if(debug) fprintf(stderr, "freeipmi.plugin: checking data collection speed iteration %d of %d\n", i+1, checks);
+
+ // measure the time a data collection needs
+ unsigned long long start = now_realtime_usec();
+ if(ipmi_collect_data(ipmi_config) < 0)
+ fatal("freeipmi.plugin: data collection failed.");
+
+ unsigned long long end = now_realtime_usec();
+
+ if(debug) fprintf(stderr, "freeipmi.plugin: data collection speed was %llu usec\n", end - start);
+
+ // add it to our total
+ total += end - start;
+
+ // wait the same time
+ // to avoid flooding the IPMI processor with requests
+ sleep_usec(end - start);
+ }
+
+ // so, we assume it needed 2x the time
+ // we find the average in microseconds
+ // and we round-up to the closest second
+
+ return (int)(( total * 2 / checks / 1000000 ) + 1);
+}
+
+int main (int argc, char **argv) {
+
+ // ------------------------------------------------------------------------
+ // initialization of netdata plugin
+
+ program_name = "freeipmi.plugin";
+
+ // disable syslog
+ error_log_syslog = 0;
+
+ // set errors flood protection to 100 logs per hour
+ error_log_errors_per_period = 100;
+ error_log_throttle_period = 3600;
+
+
+ // ------------------------------------------------------------------------
+ // parse command line parameters
+
+ int i, freq = 0;
+ for(i = 1; i < argc ; i++) {
+ if(isdigit(*argv[i]) && !freq) {
+ int n = str2i(argv[i]);
+ if(n > 0 && n < 86400) {
+ freq = n;
+ continue;
+ }
+ }
+ else if(strcmp("version", argv[i]) == 0 || strcmp("-version", argv[i]) == 0 || strcmp("--version", argv[i]) == 0 || strcmp("-v", argv[i]) == 0 || strcmp("-V", argv[i]) == 0) {
+ printf("freeipmi.plugin %s\n", VERSION);
+ exit(0);
+ }
+ else if(strcmp("debug", argv[i]) == 0) {
+ debug = 1;
+ continue;
+ }
+ else if(strcmp("sel", argv[i]) == 0) {
+ netdata_do_sel = 1;
+ continue;
+ }
+ else if(strcmp("no-sel", argv[i]) == 0) {
+ netdata_do_sel = 0;
+ continue;
+ }
+ else if(strcmp("-h", argv[i]) == 0 || strcmp("--help", argv[i]) == 0) {
+ fprintf(stderr,
+ "\n"
+ " netdata freeipmi.plugin %s\n"
+ " Copyright (C) 2016-2017 Costa Tsaousis <costa@tsaousis.gr>\n"
+ " Released under GNU General Public License v3 or later.\n"
+ " All rights reserved.\n"
+ "\n"
+ " This program is a data collector plugin for netdata.\n"
+ "\n"
+ " Available command line options:\n"
+ "\n"
+ " SECONDS data collection frequency\n"
+ " minimum: %d\n"
+ "\n"
+ " debug enable verbose output\n"
+ " default: disabled\n"
+ "\n"
+ " sel\n"
+ " no-sel enable/disable SEL collection\n"
+ " default: %s\n"
+ "\n"
+ " hostname HOST\n"
+ " username USER\n"
+ " password PASS connect to remote IPMI host\n"
+ " default: local IPMI processor\n"
+ "\n"
+ " sdr-cache-dir PATH directory for SDR cache files\n"
+ " default: %s\n"
+ "\n"
+ " sensor-config-file FILE filename to read sensor configuration\n"
+ " default: %s\n"
+ "\n"
+ " ignore N1,N2,N3,... sensor IDs to ignore\n"
+ " default: none\n"
+ "\n"
+ " ignore-status N1,N2,N3,... sensor IDs to ignore status (nominal/warning/critical)\n"
+ " default: none\n"
+ "\n"
+ " -v\n"
+ " -V\n"
+ " version print version and exit\n"
+ "\n"
+ " Linux kernel module for IPMI is CPU hungry.\n"
+ " On Linux run this to lower kipmiN CPU utilization:\n"
+ " # echo 10 > /sys/module/ipmi_si/parameters/kipmid_max_busy_us\n"
+ "\n"
+ " or create: /etc/modprobe.d/ipmi.conf with these contents:\n"
+ " options ipmi_si kipmid_max_busy_us=10\n"
+ "\n"
+ " For more information:\n"
+ " https://github.com/ktsaou/netdata/tree/master/plugins/freeipmi.plugin\n"
+ "\n"
+ , VERSION
+ , netdata_update_every
+ , netdata_do_sel?"enabled":"disabled"
+ , sdr_cache_directory?sdr_cache_directory:"system default"
+ , sensor_config_file?sensor_config_file:"system default"
+ );
+ exit(1);
+ }
+ else if(i < argc && strcmp("hostname", argv[i]) == 0) {
+ hostname = strdupz(argv[++i]);
+ char *s = argv[i];
+ // mask it be hidden from the process tree
+ while(*s) *s++ = 'x';
+ if(debug) fprintf(stderr, "freeipmi.plugin: hostname set to '%s'\n", hostname);
+ continue;
+ }
+ else if(i < argc && strcmp("username", argv[i]) == 0) {
+ username = strdupz(argv[++i]);
+ char *s = argv[i];
+ // mask it be hidden from the process tree
+ while(*s) *s++ = 'x';
+ if(debug) fprintf(stderr, "freeipmi.plugin: username set to '%s'\n", username);
+ continue;
+ }
+ else if(i < argc && strcmp("password", argv[i]) == 0) {
+ password = strdupz(argv[++i]);
+ char *s = argv[i];
+ // mask it be hidden from the process tree
+ while(*s) *s++ = 'x';
+ if(debug) fprintf(stderr, "freeipmi.plugin: password set to '%s'\n", password);
+ continue;
+ }
+ else if(i < argc && strcmp("sdr-cache-dir", argv[i]) == 0) {
+ sdr_cache_directory = argv[++i];
+ if(debug) fprintf(stderr, "freeipmi.plugin: SDR cache directory set to '%s'\n", sdr_cache_directory);
+ continue;
+ }
+ else if(i < argc && strcmp("sensor-config-file", argv[i]) == 0) {
+ sensor_config_file = argv[++i];
+ if(debug) fprintf(stderr, "freeipmi.plugin: sensor config file set to '%s'\n", sensor_config_file);
+ continue;
+ }
+ else if(i < argc && strcmp("ignore", argv[i]) == 0) {
+ excluded_record_ids_parse(argv[++i]);
+ continue;
+ }
+ else if(i < argc && strcmp("ignore-status", argv[i]) == 0) {
+ excluded_status_record_ids_parse(argv[++i]);
+ continue;
+ }
+
+ error("freeipmi.plugin: ignoring parameter '%s'", argv[i]);
+ }
+
+ errno = 0;
+
+ if(freq > netdata_update_every)
+ netdata_update_every = freq;
+
+ else if(freq)
+ error("update frequency %d seconds is too small for IPMI. Using %d.", freq, netdata_update_every);
+
+
+ // ------------------------------------------------------------------------
+ // initialize IPMI
+
+ struct ipmi_monitoring_ipmi_config ipmi_config;
+
+ if(debug) fprintf(stderr, "freeipmi.plugin: calling _init_ipmi_config()\n");
+
+ _init_ipmi_config(&ipmi_config);
+
+ if(debug) fprintf(stderr, "freeipmi.plugin: calling ipmi_monitoring_init()\n");
+
+ if(ipmi_monitoring_init(ipmimonitoring_init_flags, &errnum) < 0)
+ fatal("ipmi_monitoring_init: %s", ipmi_monitoring_ctx_strerror(errnum));
+
+ if(debug) fprintf(stderr, "freeipmi.plugin: detecting IPMI minimum update frequency...\n");
+ freq = ipmi_detect_speed_secs(&ipmi_config);
+ if(debug) fprintf(stderr, "freeipmi.plugin: IPMI minimum update frequency was calculated to %d seconds.\n", freq);
+
+ if(freq > netdata_update_every) {
+ info("enforcing minimum data collection frequency, calculated to %d seconds.", freq);
+ netdata_update_every = freq;
+ }
+
+
+ // ------------------------------------------------------------------------
+ // the main loop
+
+ if(debug) fprintf(stderr, "freeipmi.plugin: starting data collection\n");
+
+ time_t started_t = now_monotonic_sec();
+
+ size_t iteration = 0;
+ usec_t step = netdata_update_every * USEC_PER_SEC;
+
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ for(iteration = 0; 1 ; iteration++) {
+ usec_t dt = heartbeat_next(&hb, step);
+
+ if(debug && iteration)
+ fprintf(stderr, "freeipmi.plugin: iteration %zu, dt %llu usec, sensors collected %zu, sensors sent to netdata %zu \n"
+ , iteration
+ , dt
+ , netdata_sensors_collected
+ , netdata_sensors_updated
+ );
+
+ netdata_mark_as_not_updated();
+
+ if(debug) fprintf(stderr, "freeipmi.plugin: calling ipmi_collect_data()\n");
+ if(ipmi_collect_data(&ipmi_config) < 0)
+ fatal("data collection failed.");
+
+ if(debug) fprintf(stderr, "freeipmi.plugin: calling send_metrics_to_netdata()\n");
+ send_metrics_to_netdata();
+ fflush(stdout);
+
+ // restart check (14400 seconds)
+ if(now_monotonic_sec() - started_t > 14400) exit(0);
+ }
+}
+
+#else // !HAVE_FREEIPMI
+
+int main(int argc, char **argv) {
+ fatal("freeipmi.plugin is not compiled.");
+}
+
+#endif // !HAVE_FREEIPMI
diff --git a/collectors/idlejitter.plugin/Makefile.am b/collectors/idlejitter.plugin/Makefile.am
new file mode 100644
index 000000000..19554bed8
--- /dev/null
+++ b/collectors/idlejitter.plugin/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/collectors/idlejitter.plugin/Makefile.in b/collectors/idlejitter.plugin/Makefile.in
new file mode 100644
index 000000000..973a3bef7
--- /dev/null
+++ b/collectors/idlejitter.plugin/Makefile.in
@@ -0,0 +1,464 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = collectors/idlejitter.plugin
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
+ $(dist_noinst_DATA)
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/idlejitter.plugin/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu collectors/idlejitter.plugin/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/collectors/idlejitter.plugin/README.md b/collectors/idlejitter.plugin/README.md
new file mode 100644
index 000000000..3c2080536
--- /dev/null
+++ b/collectors/idlejitter.plugin/README.md
@@ -0,0 +1,13 @@
+## idlejitter.plugin
+
+It works like this:
+
+A thread is spawn that requests to sleep for 20000 microseconds (20ms).
+When the system wakes it up, it measures how many microseconds have passed.
+The difference between the requested and the actual duration of the sleep, is the idle jitter.
+This is done at most 50 times per second, to ensure we have a good average.
+
+This number is useful:
+
+ 1. in real-time environments, when the CPU jitter can affect the quality of the service (like VoIP media gateways).
+ 2. in cloud infrastructure, at can pause the VM or container for a small duration to perform operations at the host.
diff --git a/collectors/idlejitter.plugin/plugin_idlejitter.c b/collectors/idlejitter.plugin/plugin_idlejitter.c
new file mode 100644
index 000000000..3fe3b0306
--- /dev/null
+++ b/collectors/idlejitter.plugin/plugin_idlejitter.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_idlejitter.h"
+
+#define CPU_IDLEJITTER_SLEEP_TIME_MS 20
+
+static void cpuidlejitter_main_cleanup(void *ptr) {
+ struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
+
+ info("cleaning up...");
+
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+}
+
+void *cpuidlejitter_main(void *ptr) {
+ netdata_thread_cleanup_push(cpuidlejitter_main_cleanup, ptr);
+
+ usec_t sleep_ut = config_get_number("plugin:idlejitter", "loop time in ms", CPU_IDLEJITTER_SLEEP_TIME_MS) * USEC_PER_MS;
+ if(sleep_ut <= 0) {
+ config_set_number("plugin:idlejitter", "loop time in ms", CPU_IDLEJITTER_SLEEP_TIME_MS);
+ sleep_ut = CPU_IDLEJITTER_SLEEP_TIME_MS * USEC_PER_MS;
+ }
+
+ RRDSET *st = rrdset_create_localhost(
+ "system"
+ , "idlejitter"
+ , NULL
+ , "idlejitter"
+ , NULL
+ , "CPU Idle Jitter"
+ , "microseconds lost/s"
+ , "idlejitter.plugin"
+ , NULL
+ , NETDATA_CHART_PRIO_SYSTEM_IDLEJITTER
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_AREA
+ );
+ RRDDIM *rd_min = rrddim_add(st, "min", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ RRDDIM *rd_max = rrddim_add(st, "max", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ RRDDIM *rd_avg = rrddim_add(st, "average", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ usec_t update_every_ut = localhost->rrd_update_every * USEC_PER_SEC;
+ struct timeval before, after;
+ unsigned long long counter;
+
+ for(counter = 0; 1 ;counter++) {
+ int iterations = 0;
+ usec_t error_total = 0,
+ error_min = 0,
+ error_max = 0,
+ elapsed = 0;
+
+ if(netdata_exit) break;
+
+ while(elapsed < update_every_ut) {
+ now_monotonic_timeval(&before);
+ sleep_usec(sleep_ut);
+ now_monotonic_timeval(&after);
+
+ usec_t dt = dt_usec(&after, &before);
+ elapsed += dt;
+
+ usec_t error = dt - sleep_ut;
+ error_total += error;
+
+ if(unlikely(!iterations))
+ error_min = error;
+ else if(error < error_min)
+ error_min = error;
+
+ if(error > error_max)
+ error_max = error;
+
+ iterations++;
+ }
+
+ if(netdata_exit) break;
+
+ if(iterations) {
+ if (likely(counter)) rrdset_next(st);
+ rrddim_set_by_pointer(st, rd_min, error_min);
+ rrddim_set_by_pointer(st, rd_max, error_max);
+ rrddim_set_by_pointer(st, rd_avg, error_total / iterations);
+ rrdset_done(st);
+ }
+ }
+
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
+
diff --git a/collectors/idlejitter.plugin/plugin_idlejitter.h b/collectors/idlejitter.plugin/plugin_idlejitter.h
new file mode 100644
index 000000000..62fabea16
--- /dev/null
+++ b/collectors/idlejitter.plugin/plugin_idlejitter.h
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_PLUGIN_IDLEJITTER_H
+#define NETDATA_PLUGIN_IDLEJITTER_H 1
+
+#include "../../daemon/common.h"
+
+#define NETDATA_PLUGIN_HOOK_IDLEJITTER \
+ { \
+ .name = "PLUGIN[idlejitter]", \
+ .config_section = CONFIG_SECTION_PLUGINS, \
+ .config_name = "idlejitter", \
+ .enabled = 1, \
+ .thread = NULL, \
+ .init_routine = NULL, \
+ .start_routine = cpuidlejitter_main \
+ },
+
+extern void *cpuidlejitter_main(void *ptr);
+
+#endif /* NETDATA_PLUGIN_IDLEJITTER_H */
diff --git a/collectors/macos.plugin/Makefile.am b/collectors/macos.plugin/Makefile.am
new file mode 100644
index 000000000..babdcf0df
--- /dev/null
+++ b/collectors/macos.plugin/Makefile.am
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
diff --git a/collectors/macos.plugin/Makefile.in b/collectors/macos.plugin/Makefile.in
new file mode 100644
index 000000000..6247dda70
--- /dev/null
+++ b/collectors/macos.plugin/Makefile.in
@@ -0,0 +1,457 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = collectors/macos.plugin
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/macos.plugin/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu collectors/macos.plugin/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/collectors/macos.plugin/macos_fw.c b/collectors/macos.plugin/macos_fw.c
new file mode 100644
index 000000000..5d0ba929e
--- /dev/null
+++ b/collectors/macos.plugin/macos_fw.c
@@ -0,0 +1,687 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_macos.h"
+
+#include <CoreFoundation/CoreFoundation.h>
+#include <IOKit/IOKitLib.h>
+#include <IOKit/storage/IOBlockStorageDriver.h>
+#include <IOKit/IOBSD.h>
+// NEEDED BY do_space, do_inodes
+#include <sys/mount.h>
+// NEEDED BY: struct ifaddrs, getifaddrs()
+#include <net/if.h>
+#include <ifaddrs.h>
+
+// NEEDED BY: do_bandwidth
+#define IFA_DATA(s) (((struct if_data *)ifa->ifa_data)->ifi_ ## s)
+
+#define MAXDRIVENAME 31
+
+#define KILO_FACTOR 1024
+#define MEGA_FACTOR 1048576 // 1024 * 1024
+#define GIGA_FACTOR 1073741824 // 1024 * 1024 * 1024
+
+int do_macos_iokit(int update_every, usec_t dt) {
+ (void)dt;
+
+ static int do_io = -1, do_space = -1, do_inodes = -1, do_bandwidth = -1;
+
+ if (unlikely(do_io == -1)) {
+ do_io = config_get_boolean("plugin:macos:iokit", "disk i/o", 1);
+ do_space = config_get_boolean("plugin:macos:sysctl", "space usage for all disks", 1);
+ do_inodes = config_get_boolean("plugin:macos:sysctl", "inodes usage for all disks", 1);
+ do_bandwidth = config_get_boolean("plugin:macos:sysctl", "bandwidth", 1);
+ }
+
+ RRDSET *st;
+
+ mach_port_t master_port;
+ io_registry_entry_t drive, drive_media;
+ io_iterator_t drive_list;
+ CFDictionaryRef properties, statistics;
+ CFStringRef name;
+ CFNumberRef number;
+ kern_return_t status;
+ collected_number total_disk_reads = 0;
+ collected_number total_disk_writes = 0;
+ struct diskstat {
+ char name[MAXDRIVENAME];
+ collected_number bytes_read;
+ collected_number bytes_write;
+ collected_number reads;
+ collected_number writes;
+ collected_number time_read;
+ collected_number time_write;
+ collected_number latency_read;
+ collected_number latency_write;
+ } diskstat;
+ struct cur_diskstat {
+ collected_number duration_read_ns;
+ collected_number duration_write_ns;
+ collected_number busy_time_ns;
+ } cur_diskstat;
+ struct prev_diskstat {
+ collected_number bytes_read;
+ collected_number bytes_write;
+ collected_number operations_read;
+ collected_number operations_write;
+ collected_number duration_read_ns;
+ collected_number duration_write_ns;
+ collected_number busy_time_ns;
+ } prev_diskstat;
+
+ // NEEDED BY: do_space, do_inodes
+ struct statfs *mntbuf;
+ int mntsize, i;
+ char mntonname[MNAMELEN + 1];
+ char title[4096 + 1];
+
+ // NEEDED BY: do_bandwidth
+ struct ifaddrs *ifa, *ifap;
+
+ /* Get ports and services for drive statistics. */
+ if (unlikely(IOMasterPort(bootstrap_port, &master_port))) {
+ error("MACOS: IOMasterPort() failed");
+ do_io = 0;
+ error("DISABLED: system.io");
+ /* Get the list of all drive objects. */
+ } else if (unlikely(IOServiceGetMatchingServices(master_port, IOServiceMatching("IOBlockStorageDriver"), &drive_list))) {
+ error("MACOS: IOServiceGetMatchingServices() failed");
+ do_io = 0;
+ error("DISABLED: system.io");
+ } else {
+ while ((drive = IOIteratorNext(drive_list)) != 0) {
+ properties = 0;
+ statistics = 0;
+ number = 0;
+ bzero(&diskstat, sizeof(diskstat));
+
+ /* Get drive media object. */
+ status = IORegistryEntryGetChildEntry(drive, kIOServicePlane, &drive_media);
+ if (unlikely(status != KERN_SUCCESS)) {
+ IOObjectRelease(drive);
+ continue;
+ }
+
+ /* Get drive media properties. */
+ if (likely(!IORegistryEntryCreateCFProperties(drive_media, (CFMutableDictionaryRef *)&properties, kCFAllocatorDefault, 0))) {
+ /* Get disk name. */
+ if (likely(name = (CFStringRef)CFDictionaryGetValue(properties, CFSTR(kIOBSDNameKey)))) {
+ CFStringGetCString(name, diskstat.name, MAXDRIVENAME, kCFStringEncodingUTF8);
+ }
+ }
+
+ /* Release. */
+ CFRelease(properties);
+ IOObjectRelease(drive_media);
+
+ if(unlikely(!diskstat.name || !*diskstat.name)) {
+ IOObjectRelease(drive);
+ continue;
+ }
+
+ /* Obtain the properties for this drive object. */
+ if (unlikely(IORegistryEntryCreateCFProperties(drive, (CFMutableDictionaryRef *)&properties, kCFAllocatorDefault, 0))) {
+ IOObjectRelease(drive);
+ error("MACOS: IORegistryEntryCreateCFProperties() failed");
+ do_io = 0;
+ error("DISABLED: system.io");
+ break;
+ } else if (likely(properties)) {
+ /* Obtain the statistics from the drive properties. */
+ if (likely(statistics = (CFDictionaryRef)CFDictionaryGetValue(properties, CFSTR(kIOBlockStorageDriverStatisticsKey)))) {
+
+ // --------------------------------------------------------------------
+
+ /* Get bytes read. */
+ if (likely(number = (CFNumberRef)CFDictionaryGetValue(statistics, CFSTR(kIOBlockStorageDriverStatisticsBytesReadKey)))) {
+ CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.bytes_read);
+ total_disk_reads += diskstat.bytes_read;
+ }
+
+ /* Get bytes written. */
+ if (likely(number = (CFNumberRef)CFDictionaryGetValue(statistics, CFSTR(kIOBlockStorageDriverStatisticsBytesWrittenKey)))) {
+ CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.bytes_write);
+ total_disk_writes += diskstat.bytes_write;
+ }
+
+ st = rrdset_find_bytype_localhost("disk", diskstat.name);
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "disk"
+ , diskstat.name
+ , NULL
+ , diskstat.name
+ , "disk.io"
+ , "Disk I/O Bandwidth"
+ , "kilobytes/s"
+ , "macos"
+ , "iokit"
+ , 2000
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rrddim_add(st, "reads", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "writes", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ prev_diskstat.bytes_read = rrddim_set(st, "reads", diskstat.bytes_read);
+ prev_diskstat.bytes_write = rrddim_set(st, "writes", diskstat.bytes_write);
+ rrdset_done(st);
+
+ // --------------------------------------------------------------------
+
+ /* Get number of reads. */
+ if (likely(number = (CFNumberRef)CFDictionaryGetValue(statistics, CFSTR(kIOBlockStorageDriverStatisticsReadsKey)))) {
+ CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.reads);
+ }
+
+ /* Get number of writes. */
+ if (likely(number = (CFNumberRef)CFDictionaryGetValue(statistics, CFSTR(kIOBlockStorageDriverStatisticsWritesKey)))) {
+ CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.writes);
+ }
+
+ st = rrdset_find_bytype_localhost("disk_ops", diskstat.name);
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "disk_ops"
+ , diskstat.name
+ , NULL
+ , diskstat.name
+ , "disk.ops"
+ , "Disk Completed I/O Operations"
+ , "operations/s"
+ , "macos"
+ , "iokit"
+ , 2001
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "reads", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "writes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ prev_diskstat.operations_read = rrddim_set(st, "reads", diskstat.reads);
+ prev_diskstat.operations_write = rrddim_set(st, "writes", diskstat.writes);
+ rrdset_done(st);
+
+ // --------------------------------------------------------------------
+
+ /* Get reads time. */
+ if (likely(number = (CFNumberRef)CFDictionaryGetValue(statistics, CFSTR(kIOBlockStorageDriverStatisticsTotalReadTimeKey)))) {
+ CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.time_read);
+ }
+
+ /* Get writes time. */
+ if (likely(number = (CFNumberRef)CFDictionaryGetValue(statistics, CFSTR(kIOBlockStorageDriverStatisticsTotalWriteTimeKey)))) {
+ CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.time_write);
+ }
+
+ st = rrdset_find_bytype_localhost("disk_util", diskstat.name);
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "disk_util"
+ , diskstat.name
+ , NULL
+ , diskstat.name
+ , "disk.util"
+ , "Disk Utilization Time"
+ , "% of time working"
+ , "macos"
+ , "iokit"
+ , 2004
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "utilization", NULL, 1, 10000000, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ cur_diskstat.busy_time_ns = (diskstat.time_read + diskstat.time_write);
+ prev_diskstat.busy_time_ns = rrddim_set(st, "utilization", cur_diskstat.busy_time_ns);
+ rrdset_done(st);
+
+ // --------------------------------------------------------------------
+
+ /* Get reads latency. */
+ if (likely(number = (CFNumberRef)CFDictionaryGetValue(statistics, CFSTR(kIOBlockStorageDriverStatisticsLatentReadTimeKey)))) {
+ CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.latency_read);
+ }
+
+ /* Get writes latency. */
+ if (likely(number = (CFNumberRef)CFDictionaryGetValue(statistics, CFSTR(kIOBlockStorageDriverStatisticsLatentWriteTimeKey)))) {
+ CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.latency_write);
+ }
+
+ st = rrdset_find_bytype_localhost("disk_iotime", diskstat.name);
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "disk_iotime"
+ , diskstat.name
+ , NULL
+ , diskstat.name
+ , "disk.iotime"
+ , "Disk Total I/O Time"
+ , "milliseconds/s"
+ , "macos"
+ , "iokit"
+ , 2022
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "reads", NULL, 1, 1000000, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "writes", NULL, -1, 1000000, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ cur_diskstat.duration_read_ns = diskstat.time_read + diskstat.latency_read;
+ cur_diskstat.duration_write_ns = diskstat.time_write + diskstat.latency_write;
+ prev_diskstat.duration_read_ns = rrddim_set(st, "reads", cur_diskstat.duration_read_ns);
+ prev_diskstat.duration_write_ns = rrddim_set(st, "writes", cur_diskstat.duration_write_ns);
+ rrdset_done(st);
+
+ // --------------------------------------------------------------------
+ // calculate differential charts
+ // only if this is not the first time we run
+
+ if (likely(dt)) {
+
+ // --------------------------------------------------------------------
+
+ st = rrdset_find_bytype_localhost("disk_await", diskstat.name);
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "disk_await"
+ , diskstat.name
+ , NULL
+ , diskstat.name
+ , "disk.await"
+ , "Average Completed I/O Operation Time"
+ , "ms per operation"
+ , "macos"
+ , "iokit"
+ , 2005
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "reads", NULL, 1, 1000000, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(st, "writes", NULL, -1, 1000000, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "reads", (diskstat.reads - prev_diskstat.operations_read) ?
+ (cur_diskstat.duration_read_ns - prev_diskstat.duration_read_ns) / (diskstat.reads - prev_diskstat.operations_read) : 0);
+ rrddim_set(st, "writes", (diskstat.writes - prev_diskstat.operations_write) ?
+ (cur_diskstat.duration_write_ns - prev_diskstat.duration_write_ns) / (diskstat.writes - prev_diskstat.operations_write) : 0);
+ rrdset_done(st);
+
+ // --------------------------------------------------------------------
+
+ st = rrdset_find_bytype_localhost("disk_avgsz", diskstat.name);
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "disk_avgsz"
+ , diskstat.name
+ , NULL
+ , diskstat.name
+ , "disk.avgsz"
+ , "Average Completed I/O Operation Bandwidth"
+ , "kilobytes per operation"
+ , "macos"
+ , "iokit"
+ , 2006
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "reads", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(st, "writes", NULL, -1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "reads", (diskstat.reads - prev_diskstat.operations_read) ?
+ (diskstat.bytes_read - prev_diskstat.bytes_read) / (diskstat.reads - prev_diskstat.operations_read) : 0);
+ rrddim_set(st, "writes", (diskstat.writes - prev_diskstat.operations_write) ?
+ (diskstat.bytes_write - prev_diskstat.bytes_write) / (diskstat.writes - prev_diskstat.operations_write) : 0);
+ rrdset_done(st);
+
+ // --------------------------------------------------------------------
+
+ st = rrdset_find_bytype_localhost("disk_svctm", diskstat.name);
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "disk_svctm"
+ , diskstat.name
+ , NULL
+ , diskstat.name
+ , "disk.svctm"
+ , "Average Service Time"
+ , "ms per operation"
+ , "macos"
+ , "iokit"
+ , 2007
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "svctm", NULL, 1, 1000000, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "svctm", ((diskstat.reads - prev_diskstat.operations_read) + (diskstat.writes - prev_diskstat.operations_write)) ?
+ (cur_diskstat.busy_time_ns - prev_diskstat.busy_time_ns) / ((diskstat.reads - prev_diskstat.operations_read) + (diskstat.writes - prev_diskstat.operations_write)) : 0);
+ rrdset_done(st);
+ }
+ }
+
+ /* Release. */
+ CFRelease(properties);
+ }
+
+ /* Release. */
+ IOObjectRelease(drive);
+ }
+ IOIteratorReset(drive_list);
+
+ /* Release. */
+ IOObjectRelease(drive_list);
+ }
+
+ if (likely(do_io)) {
+ st = rrdset_find_bytype_localhost("system", "io");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system"
+ , "io"
+ , NULL
+ , "disk"
+ , NULL
+ , "Disk I/O"
+ , "kilobytes/s"
+ , "macos"
+ , "iokit"
+ , 150
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+ rrddim_add(st, "in", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "out", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "in", total_disk_reads);
+ rrddim_set(st, "out", total_disk_writes);
+ rrdset_done(st);
+ }
+
+ // Can be merged with FreeBSD plugin
+ // --------------------------------------------------------------------------
+
+ if (likely(do_space || do_inodes)) {
+ // there is no mount info in sysctl MIBs
+ if (unlikely(!(mntsize = getmntinfo(&mntbuf, MNT_NOWAIT)))) {
+ error("MACOS: getmntinfo() failed");
+ do_space = 0;
+ error("DISABLED: disk_space.X");
+ do_inodes = 0;
+ error("DISABLED: disk_inodes.X");
+ } else {
+ for (i = 0; i < mntsize; i++) {
+ if (mntbuf[i].f_flags == MNT_RDONLY ||
+ mntbuf[i].f_blocks == 0 ||
+ // taken from gnulib/mountlist.c and shortened to FreeBSD related fstypes
+ strcmp(mntbuf[i].f_fstypename, "autofs") == 0 ||
+ strcmp(mntbuf[i].f_fstypename, "procfs") == 0 ||
+ strcmp(mntbuf[i].f_fstypename, "subfs") == 0 ||
+ strcmp(mntbuf[i].f_fstypename, "devfs") == 0 ||
+ strcmp(mntbuf[i].f_fstypename, "none") == 0)
+ continue;
+
+ // --------------------------------------------------------------------------
+
+ if (likely(do_space)) {
+ st = rrdset_find_bytype_localhost("disk_space", mntbuf[i].f_mntonname);
+ if (unlikely(!st)) {
+ snprintfz(title, 4096, "Disk Space Usage for %s [%s]", mntbuf[i].f_mntonname, mntbuf[i].f_mntfromname);
+ st = rrdset_create_localhost(
+ "disk_space"
+ , mntbuf[i].f_mntonname
+ , NULL
+ , mntbuf[i].f_mntonname
+ , "disk.space"
+ , title
+ , "GB"
+ , "macos"
+ , "iokit"
+ , 2023
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rrddim_add(st, "avail", NULL, mntbuf[i].f_bsize, GIGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(st, "used", NULL, mntbuf[i].f_bsize, GIGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(st, "reserved_for_root", "reserved for root", mntbuf[i].f_bsize, GIGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "avail", (collected_number) mntbuf[i].f_bavail);
+ rrddim_set(st, "used", (collected_number) (mntbuf[i].f_blocks - mntbuf[i].f_bfree));
+ rrddim_set(st, "reserved_for_root", (collected_number) (mntbuf[i].f_bfree - mntbuf[i].f_bavail));
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------------
+
+ if (likely(do_inodes)) {
+ st = rrdset_find_bytype_localhost("disk_inodes", mntbuf[i].f_mntonname);
+ if (unlikely(!st)) {
+ snprintfz(title, 4096, "Disk Files (inodes) Usage for %s [%s]", mntbuf[i].f_mntonname, mntbuf[i].f_mntfromname);
+ st = rrdset_create_localhost(
+ "disk_inodes"
+ , mntbuf[i].f_mntonname
+ , NULL
+ , mntbuf[i].f_mntonname
+ , "disk.inodes"
+ , title
+ , "Inodes"
+ , "macos"
+ , "iokit"
+ , 2024
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rrddim_add(st, "avail", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(st, "used", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(st, "reserved_for_root", "reserved for root", 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "avail", (collected_number) mntbuf[i].f_ffree);
+ rrddim_set(st, "used", (collected_number) (mntbuf[i].f_files - mntbuf[i].f_ffree));
+ rrdset_done(st);
+ }
+ }
+ }
+ }
+
+ // Can be merged with FreeBSD plugin
+ // --------------------------------------------------------------------
+
+ if (likely(do_bandwidth)) {
+ if (unlikely(getifaddrs(&ifap))) {
+ error("MACOS: getifaddrs()");
+ do_bandwidth = 0;
+ error("DISABLED: system.ipv4");
+ } else {
+ for (ifa = ifap; ifa; ifa = ifa->ifa_next) {
+ if (ifa->ifa_addr->sa_family != AF_LINK)
+ continue;
+
+ // --------------------------------------------------------------------
+
+ st = rrdset_find_bytype_localhost("net", ifa->ifa_name);
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "net"
+ , ifa->ifa_name
+ , NULL
+ , ifa->ifa_name
+ , "net.net"
+ , "Bandwidth"
+ , "kilobits/s"
+ , "macos"
+ , "iokit"
+ , 7000
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rrddim_add(st, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "received", IFA_DATA(ibytes));
+ rrddim_set(st, "sent", IFA_DATA(obytes));
+ rrdset_done(st);
+
+ // --------------------------------------------------------------------
+
+ st = rrdset_find_bytype_localhost("net_packets", ifa->ifa_name);
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "net_packets"
+ , ifa->ifa_name
+ , NULL
+ , ifa->ifa_name
+ , "net.packets"
+ , "Packets"
+ , "packets/s"
+ , "macos"
+ , "iokit"
+ , 7001
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "multicast_received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "multicast_sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "received", IFA_DATA(ipackets));
+ rrddim_set(st, "sent", IFA_DATA(opackets));
+ rrddim_set(st, "multicast_received", IFA_DATA(imcasts));
+ rrddim_set(st, "multicast_sent", IFA_DATA(omcasts));
+ rrdset_done(st);
+
+ // --------------------------------------------------------------------
+
+ st = rrdset_find_bytype_localhost("net_errors", ifa->ifa_name);
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "net_errors"
+ , ifa->ifa_name
+ , NULL
+ , ifa->ifa_name
+ , "net.errors"
+ , "Interface Errors"
+ , "errors/s"
+ , "macos"
+ , "iokit"
+ , 7002
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "inbound", IFA_DATA(ierrors));
+ rrddim_set(st, "outbound", IFA_DATA(oerrors));
+ rrdset_done(st);
+
+ // --------------------------------------------------------------------
+
+ st = rrdset_find_bytype_localhost("net_drops", ifa->ifa_name);
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "net_drops"
+ , ifa->ifa_name
+ , NULL
+ , ifa->ifa_name
+ , "net.drops"
+ , "Interface Drops"
+ , "drops/s"
+ , "macos"
+ , "iokit"
+ , 7003
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "inbound", IFA_DATA(iqdrops));
+ rrdset_done(st);
+
+ // --------------------------------------------------------------------
+
+ st = rrdset_find_bytype_localhost("net_events", ifa->ifa_name);
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "net_events"
+ , ifa->ifa_name
+ , NULL
+ , ifa->ifa_name
+ , "net.events"
+ , "Network Interface Events"
+ , "events/s"
+ , "macos"
+ , "iokit"
+ , 7006
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "frames", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "collisions", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "carrier", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "collisions", IFA_DATA(collisions));
+ rrdset_done(st);
+ }
+
+ freeifaddrs(ifap);
+ }
+ }
+
+
+ return 0;
+}
diff --git a/collectors/macos.plugin/macos_mach_smi.c b/collectors/macos.plugin/macos_mach_smi.c
new file mode 100644
index 000000000..1c43d624c
--- /dev/null
+++ b/collectors/macos.plugin/macos_mach_smi.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_macos.h"
+
+#include <mach/mach.h>
+
+int do_macos_mach_smi(int update_every, usec_t dt) {
+ (void)dt;
+
+ static int do_cpu = -1, do_ram = - 1, do_swapio = -1, do_pgfaults = -1;
+
+ if (unlikely(do_cpu == -1)) {
+ do_cpu = config_get_boolean("plugin:macos:mach_smi", "cpu utilization", 1);
+ do_ram = config_get_boolean("plugin:macos:mach_smi", "system ram", 1);
+ do_swapio = config_get_boolean("plugin:macos:mach_smi", "swap i/o", 1);
+ do_pgfaults = config_get_boolean("plugin:macos:mach_smi", "memory page faults", 1);
+ }
+
+ RRDSET *st;
+
+ kern_return_t kr;
+ mach_msg_type_number_t count;
+ host_t host;
+ vm_size_t system_pagesize;
+
+
+ // NEEDED BY: do_cpu
+ natural_t cp_time[CPU_STATE_MAX];
+
+ // NEEDED BY: do_ram, do_swapio, do_pgfaults
+#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1060)
+ vm_statistics64_data_t vm_statistics;
+#else
+ vm_statistics_data_t vm_statistics;
+#endif
+
+ host = mach_host_self();
+ kr = host_page_size(host, &system_pagesize);
+ if (unlikely(kr != KERN_SUCCESS))
+ return -1;
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_cpu)) {
+ if (unlikely(HOST_CPU_LOAD_INFO_COUNT != 4)) {
+ error("MACOS: There are %d CPU states (4 was expected)", HOST_CPU_LOAD_INFO_COUNT);
+ do_cpu = 0;
+ error("DISABLED: system.cpu");
+ } else {
+ count = HOST_CPU_LOAD_INFO_COUNT;
+ kr = host_statistics(host, HOST_CPU_LOAD_INFO, (host_info_t)cp_time, &count);
+ if (unlikely(kr != KERN_SUCCESS)) {
+ error("MACOS: host_statistics() failed: %s", mach_error_string(kr));
+ do_cpu = 0;
+ error("DISABLED: system.cpu");
+ } else {
+
+ st = rrdset_find_bytype_localhost("system", "cpu");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system"
+ , "cpu"
+ , NULL
+ , "cpu"
+ , "system.cpu"
+ , "Total CPU utilization"
+ , "percentage"
+ , "macos"
+ , "mach_smi"
+ , 100
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rrddim_add(st, "user", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rrddim_add(st, "nice", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rrddim_add(st, "system", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rrddim_add(st, "idle", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rrddim_hide(st, "idle");
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "user", cp_time[CPU_STATE_USER]);
+ rrddim_set(st, "nice", cp_time[CPU_STATE_NICE]);
+ rrddim_set(st, "system", cp_time[CPU_STATE_SYSTEM]);
+ rrddim_set(st, "idle", cp_time[CPU_STATE_IDLE]);
+ rrdset_done(st);
+ }
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_ram || do_swapio || do_pgfaults)) {
+#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1060)
+ count = sizeof(vm_statistics64_data_t);
+ kr = host_statistics64(host, HOST_VM_INFO64, (host_info64_t)&vm_statistics, &count);
+#else
+ count = sizeof(vm_statistics_data_t);
+ kr = host_statistics(host, HOST_VM_INFO, (host_info_t)&vm_statistics, &count);
+#endif
+ if (unlikely(kr != KERN_SUCCESS)) {
+ error("MACOS: host_statistics64() failed: %s", mach_error_string(kr));
+ do_ram = 0;
+ error("DISABLED: system.ram");
+ do_swapio = 0;
+ error("DISABLED: system.swapio");
+ do_pgfaults = 0;
+ error("DISABLED: mem.pgfaults");
+ } else {
+ if (likely(do_ram)) {
+ st = rrdset_find_localhost("system.ram");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system"
+ , "ram"
+ , NULL
+ , "ram"
+ , NULL
+ , "System RAM"
+ , "MB"
+ , "macos"
+ , "mach_smi"
+ , 200
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rrddim_add(st, "active", NULL, system_pagesize, 1048576, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(st, "wired", NULL, system_pagesize, 1048576, RRD_ALGORITHM_ABSOLUTE);
+#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090)
+ rrddim_add(st, "throttled", NULL, system_pagesize, 1048576, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(st, "compressor", NULL, system_pagesize, 1048576, RRD_ALGORITHM_ABSOLUTE);
+#endif
+ rrddim_add(st, "inactive", NULL, system_pagesize, 1048576, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(st, "purgeable", NULL, system_pagesize, 1048576, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(st, "speculative", NULL, system_pagesize, 1048576, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(st, "free", NULL, system_pagesize, 1048576, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "active", vm_statistics.active_count);
+ rrddim_set(st, "wired", vm_statistics.wire_count);
+#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090)
+ rrddim_set(st, "throttled", vm_statistics.throttled_count);
+ rrddim_set(st, "compressor", vm_statistics.compressor_page_count);
+#endif
+ rrddim_set(st, "inactive", vm_statistics.inactive_count);
+ rrddim_set(st, "purgeable", vm_statistics.purgeable_count);
+ rrddim_set(st, "speculative", vm_statistics.speculative_count);
+ rrddim_set(st, "free", (vm_statistics.free_count - vm_statistics.speculative_count));
+ rrdset_done(st);
+ }
+
+#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090)
+ // --------------------------------------------------------------------
+
+ if (likely(do_swapio)) {
+ st = rrdset_find_localhost("system.swapio");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system"
+ , "swapio"
+ , NULL
+ , "swap"
+ , NULL
+ , "Swap I/O"
+ , "kilobytes/s"
+ , "macos"
+ , "mach_smi"
+ , 250
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rrddim_add(st, "in", NULL, system_pagesize, 1024, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "out", NULL, -system_pagesize, 1024, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "in", vm_statistics.swapins);
+ rrddim_set(st, "out", vm_statistics.swapouts);
+ rrdset_done(st);
+ }
+#endif
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_pgfaults)) {
+ st = rrdset_find_localhost("mem.pgfaults");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "mem"
+ , "pgfaults"
+ , NULL
+ , "system"
+ , NULL
+ , "Memory Page Faults"
+ , "page faults/s"
+ , "macos"
+ , "mach_smi"
+ , NETDATA_CHART_PRIO_MEM_SYSTEM_PGFAULTS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "memory", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "cow", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "pagein", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "pageout", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090)
+ rrddim_add(st, "compress", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "decompress", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+#endif
+ rrddim_add(st, "zero_fill", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "reactivate", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "purge", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "memory", vm_statistics.faults);
+ rrddim_set(st, "cow", vm_statistics.cow_faults);
+ rrddim_set(st, "pagein", vm_statistics.pageins);
+ rrddim_set(st, "pageout", vm_statistics.pageouts);
+#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090)
+ rrddim_set(st, "compress", vm_statistics.compressions);
+ rrddim_set(st, "decompress", vm_statistics.decompressions);
+#endif
+ rrddim_set(st, "zero_fill", vm_statistics.zero_fill_count);
+ rrddim_set(st, "reactivate", vm_statistics.reactivations);
+ rrddim_set(st, "purge", vm_statistics.purges);
+ rrdset_done(st);
+ }
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ return 0;
+}
diff --git a/collectors/macos.plugin/macos_sysctl.c b/collectors/macos.plugin/macos_sysctl.c
new file mode 100644
index 000000000..6b443c04a
--- /dev/null
+++ b/collectors/macos.plugin/macos_sysctl.c
@@ -0,0 +1,1492 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_macos.h"
+
+#include <Availability.h>
+// NEEDED BY: do_bandwidth
+#include <net/route.h>
+// NEEDED BY do_tcp...
+#include <sys/socketvar.h>
+#include <netinet/tcp_var.h>
+#include <netinet/tcp_fsm.h>
+// NEEDED BY do_udp..., do_ip...
+#include <netinet/ip_var.h>
+// NEEDED BY do_udp...
+#include <netinet/udp.h>
+#include <netinet/udp_var.h>
+// NEEDED BY do_icmp...
+#include <netinet/ip.h>
+#include <netinet/ip_icmp.h>
+#include <netinet/icmp_var.h>
+// NEEDED BY do_icmp6...
+#include <netinet/icmp6.h>
+// NEEDED BY do_uptime
+#include <time.h>
+
+// MacOS calculates load averages once every 5 seconds
+#define MIN_LOADAVG_UPDATE_EVERY 5
+
+int do_macos_sysctl(int update_every, usec_t dt) {
+ static int do_loadavg = -1, do_swap = -1, do_bandwidth = -1,
+ do_tcp_packets = -1, do_tcp_errors = -1, do_tcp_handshake = -1, do_ecn = -1,
+ do_tcpext_syscookies = -1, do_tcpext_ofo = -1, do_tcpext_connaborts = -1,
+ do_udp_packets = -1, do_udp_errors = -1, do_icmp_packets = -1, do_icmpmsg = -1,
+ do_ip_packets = -1, do_ip_fragsout = -1, do_ip_fragsin = -1, do_ip_errors = -1,
+ do_ip6_packets = -1, do_ip6_fragsout = -1, do_ip6_fragsin = -1, do_ip6_errors = -1,
+ do_icmp6 = -1, do_icmp6_redir = -1, do_icmp6_errors = -1, do_icmp6_echos = -1,
+ do_icmp6_router = -1, do_icmp6_neighbor = -1, do_icmp6_types = -1, do_uptime = -1;
+
+
+ if (unlikely(do_loadavg == -1)) {
+ do_loadavg = config_get_boolean("plugin:macos:sysctl", "enable load average", 1);
+ do_swap = config_get_boolean("plugin:macos:sysctl", "system swap", 1);
+ do_bandwidth = config_get_boolean("plugin:macos:sysctl", "bandwidth", 1);
+ do_tcp_packets = config_get_boolean("plugin:macos:sysctl", "ipv4 TCP packets", 1);
+ do_tcp_errors = config_get_boolean("plugin:macos:sysctl", "ipv4 TCP errors", 1);
+ do_tcp_handshake = config_get_boolean("plugin:macos:sysctl", "ipv4 TCP handshake issues", 1);
+ do_ecn = config_get_boolean_ondemand("plugin:macos:sysctl", "ECN packets", CONFIG_BOOLEAN_AUTO);
+ do_tcpext_syscookies = config_get_boolean_ondemand("plugin:macos:sysctl", "TCP SYN cookies", CONFIG_BOOLEAN_AUTO);
+ do_tcpext_ofo = config_get_boolean_ondemand("plugin:macos:sysctl", "TCP out-of-order queue", CONFIG_BOOLEAN_AUTO);
+ do_tcpext_connaborts = config_get_boolean_ondemand("plugin:macos:sysctl", "TCP connection aborts", CONFIG_BOOLEAN_AUTO);
+ do_udp_packets = config_get_boolean("plugin:macos:sysctl", "ipv4 UDP packets", 1);
+ do_udp_errors = config_get_boolean("plugin:macos:sysctl", "ipv4 UDP errors", 1);
+ do_icmp_packets = config_get_boolean("plugin:macos:sysctl", "ipv4 ICMP packets", 1);
+ do_icmpmsg = config_get_boolean("plugin:macos:sysctl", "ipv4 ICMP messages", 1);
+ do_ip_packets = config_get_boolean("plugin:macos:sysctl", "ipv4 packets", 1);
+ do_ip_fragsout = config_get_boolean("plugin:macos:sysctl", "ipv4 fragments sent", 1);
+ do_ip_fragsin = config_get_boolean("plugin:macos:sysctl", "ipv4 fragments assembly", 1);
+ do_ip_errors = config_get_boolean("plugin:macos:sysctl", "ipv4 errors", 1);
+ do_ip6_packets = config_get_boolean_ondemand("plugin:macos:sysctl", "ipv6 packets", CONFIG_BOOLEAN_AUTO);
+ do_ip6_fragsout = config_get_boolean_ondemand("plugin:macos:sysctl", "ipv6 fragments sent", CONFIG_BOOLEAN_AUTO);
+ do_ip6_fragsin = config_get_boolean_ondemand("plugin:macos:sysctl", "ipv6 fragments assembly", CONFIG_BOOLEAN_AUTO);
+ do_ip6_errors = config_get_boolean_ondemand("plugin:macos:sysctl", "ipv6 errors", CONFIG_BOOLEAN_AUTO);
+ do_icmp6 = config_get_boolean_ondemand("plugin:macos:sysctl", "icmp", CONFIG_BOOLEAN_AUTO);
+ do_icmp6_redir = config_get_boolean_ondemand("plugin:macos:sysctl", "icmp redirects", CONFIG_BOOLEAN_AUTO);
+ do_icmp6_errors = config_get_boolean_ondemand("plugin:macos:sysctl", "icmp errors", CONFIG_BOOLEAN_AUTO);
+ do_icmp6_echos = config_get_boolean_ondemand("plugin:macos:sysctl", "icmp echos", CONFIG_BOOLEAN_AUTO);
+ do_icmp6_router = config_get_boolean_ondemand("plugin:macos:sysctl", "icmp router", CONFIG_BOOLEAN_AUTO);
+ do_icmp6_neighbor = config_get_boolean_ondemand("plugin:macos:sysctl", "icmp neighbor", CONFIG_BOOLEAN_AUTO);
+ do_icmp6_types = config_get_boolean_ondemand("plugin:macos:sysctl", "icmp types", CONFIG_BOOLEAN_AUTO);
+ do_uptime = config_get_boolean("plugin:macos:sysctl", "system uptime", 1);
+ }
+
+ RRDSET *st;
+
+ int system_pagesize = getpagesize(); // wouldn't it be better to get value directly from hw.pagesize?
+ int i, n;
+ int common_error = 0;
+ size_t size;
+
+ // NEEDED BY: do_loadavg
+ static usec_t next_loadavg_dt = 0;
+ struct loadavg sysload;
+
+ // NEEDED BY: do_swap
+ struct xsw_usage swap_usage;
+
+ // NEEDED BY: do_bandwidth
+ int mib[6];
+ static char *ifstatdata = NULL;
+ char *lim, *next;
+ struct if_msghdr *ifm;
+ struct iftot {
+ u_long ift_ibytes;
+ u_long ift_obytes;
+ } iftot = {0, 0};
+
+ // NEEDED BY: do_tcp...
+ struct tcpstat tcpstat;
+ uint64_t tcps_states[TCP_NSTATES];
+
+ // NEEDED BY: do_udp...
+ struct udpstat udpstat;
+
+ // NEEDED BY: do_icmp...
+ struct icmpstat icmpstat;
+ struct icmp_total {
+ u_long msgs_in;
+ u_long msgs_out;
+ } icmp_total = {0, 0};
+
+ // NEEDED BY: do_ip...
+ struct ipstat ipstat;
+
+ // NEEDED BY: do_ip6...
+ /*
+ * Dirty workaround for /usr/include/netinet6/ip6_var.h absence.
+ * Struct ip6stat was copied from bsd/netinet6/ip6_var.h from xnu sources.
+ * Do the same for previously missing scope6_var.h on OS X < 10.11.
+ */
+#define IP6S_SRCRULE_COUNT 16
+
+#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED < 101100)
+#ifndef _NETINET6_SCOPE6_VAR_H_
+#define _NETINET6_SCOPE6_VAR_H_
+#include <sys/appleapiopts.h>
+
+#define SCOPE6_ID_MAX 16
+#endif
+#else
+#include <netinet6/scope6_var.h>
+#endif
+
+ struct ip6stat {
+ u_quad_t ip6s_total; /* total packets received */
+ u_quad_t ip6s_tooshort; /* packet too short */
+ u_quad_t ip6s_toosmall; /* not enough data */
+ u_quad_t ip6s_fragments; /* fragments received */
+ u_quad_t ip6s_fragdropped; /* frags dropped(dups, out of space) */
+ u_quad_t ip6s_fragtimeout; /* fragments timed out */
+ u_quad_t ip6s_fragoverflow; /* fragments that exceeded limit */
+ u_quad_t ip6s_forward; /* packets forwarded */
+ u_quad_t ip6s_cantforward; /* packets rcvd for unreachable dest */
+ u_quad_t ip6s_redirectsent; /* packets forwarded on same net */
+ u_quad_t ip6s_delivered; /* datagrams delivered to upper level */
+ u_quad_t ip6s_localout; /* total ip packets generated here */
+ u_quad_t ip6s_odropped; /* lost packets due to nobufs, etc. */
+ u_quad_t ip6s_reassembled; /* total packets reassembled ok */
+ u_quad_t ip6s_atmfrag_rcvd; /* atomic fragments received */
+ u_quad_t ip6s_fragmented; /* datagrams successfully fragmented */
+ u_quad_t ip6s_ofragments; /* output fragments created */
+ u_quad_t ip6s_cantfrag; /* don't fragment flag was set, etc. */
+ u_quad_t ip6s_badoptions; /* error in option processing */
+ u_quad_t ip6s_noroute; /* packets discarded due to no route */
+ u_quad_t ip6s_badvers; /* ip6 version != 6 */
+ u_quad_t ip6s_rawout; /* total raw ip packets generated */
+ u_quad_t ip6s_badscope; /* scope error */
+ u_quad_t ip6s_notmember; /* don't join this multicast group */
+ u_quad_t ip6s_nxthist[256]; /* next header history */
+ u_quad_t ip6s_m1; /* one mbuf */
+ u_quad_t ip6s_m2m[32]; /* two or more mbuf */
+ u_quad_t ip6s_mext1; /* one ext mbuf */
+ u_quad_t ip6s_mext2m; /* two or more ext mbuf */
+ u_quad_t ip6s_exthdrtoolong; /* ext hdr are not continuous */
+ u_quad_t ip6s_nogif; /* no match gif found */
+ u_quad_t ip6s_toomanyhdr; /* discarded due to too many headers */
+
+ /*
+ * statistics for improvement of the source address selection
+ * algorithm:
+ */
+ /* number of times that address selection fails */
+ u_quad_t ip6s_sources_none;
+ /* number of times that an address on the outgoing I/F is chosen */
+ u_quad_t ip6s_sources_sameif[SCOPE6_ID_MAX];
+ /* number of times that an address on a non-outgoing I/F is chosen */
+ u_quad_t ip6s_sources_otherif[SCOPE6_ID_MAX];
+ /*
+ * number of times that an address that has the same scope
+ * from the destination is chosen.
+ */
+ u_quad_t ip6s_sources_samescope[SCOPE6_ID_MAX];
+ /*
+ * number of times that an address that has a different scope
+ * from the destination is chosen.
+ */
+ u_quad_t ip6s_sources_otherscope[SCOPE6_ID_MAX];
+ /* number of times that a deprecated address is chosen */
+ u_quad_t ip6s_sources_deprecated[SCOPE6_ID_MAX];
+
+ u_quad_t ip6s_forward_cachehit;
+ u_quad_t ip6s_forward_cachemiss;
+
+ /* number of times that each rule of source selection is applied. */
+ u_quad_t ip6s_sources_rule[IP6S_SRCRULE_COUNT];
+
+ /* number of times we ignored address on expensive secondary interfaces */
+ u_quad_t ip6s_sources_skip_expensive_secondary_if;
+
+ /* pkt dropped, no mbufs for control data */
+ u_quad_t ip6s_pktdropcntrl;
+
+ /* total packets trimmed/adjusted */
+ u_quad_t ip6s_adj;
+ /* hwcksum info discarded during adjustment */
+ u_quad_t ip6s_adj_hwcsum_clr;
+
+ /* duplicate address detection collisions */
+ u_quad_t ip6s_dad_collide;
+
+ /* DAD NS looped back */
+ u_quad_t ip6s_dad_loopcount;
+ } ip6stat;
+
+ // NEEDED BY: do_icmp6...
+ struct icmp6stat icmp6stat;
+ struct icmp6_total {
+ u_long msgs_in;
+ u_long msgs_out;
+ } icmp6_total = {0, 0};
+
+ // NEEDED BY: do_uptime
+ struct timespec boot_time, cur_time;
+
+ // --------------------------------------------------------------------
+
+ if (next_loadavg_dt <= dt) {
+ if (likely(do_loadavg)) {
+ if (unlikely(GETSYSCTL_BY_NAME("vm.loadavg", sysload))) {
+ do_loadavg = 0;
+ error("DISABLED: system.load");
+ } else {
+
+ st = rrdset_find_bytype_localhost("system", "load");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system"
+ , "load"
+ , NULL
+ , "load"
+ , NULL
+ , "System Load Average"
+ , "load"
+ , "macos"
+ , "sysctl"
+ , 100
+ , (update_every < MIN_LOADAVG_UPDATE_EVERY) ? MIN_LOADAVG_UPDATE_EVERY : update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrddim_add(st, "load1", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(st, "load5", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(st, "load15", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "load1", (collected_number) ((double)sysload.ldavg[0] / sysload.fscale * 1000));
+ rrddim_set(st, "load5", (collected_number) ((double)sysload.ldavg[1] / sysload.fscale * 1000));
+ rrddim_set(st, "load15", (collected_number) ((double)sysload.ldavg[2] / sysload.fscale * 1000));
+ rrdset_done(st);
+ }
+ }
+
+ next_loadavg_dt = st->update_every * USEC_PER_SEC;
+ }
+ else next_loadavg_dt -= dt;
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_swap)) {
+ if (unlikely(GETSYSCTL_BY_NAME("vm.swapusage", swap_usage))) {
+ do_swap = 0;
+ error("DISABLED: system.swap");
+ } else {
+ st = rrdset_find_localhost("system.swap");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system"
+ , "swap"
+ , NULL
+ , "swap"
+ , NULL
+ , "System Swap"
+ , "MB"
+ , "macos"
+ , "sysctl"
+ , 201
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "free", NULL, 1, 1048576, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(st, "used", NULL, 1, 1048576, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "free", swap_usage.xsu_avail);
+ rrddim_set(st, "used", swap_usage.xsu_used);
+ rrdset_done(st);
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_bandwidth)) {
+ mib[0] = CTL_NET;
+ mib[1] = PF_ROUTE;
+ mib[2] = 0;
+ mib[3] = AF_INET;
+ mib[4] = NET_RT_IFLIST2;
+ mib[5] = 0;
+ if (unlikely(sysctl(mib, 6, NULL, &size, NULL, 0))) {
+ error("MACOS: sysctl(%s...) failed: %s", "net interfaces", strerror(errno));
+ do_bandwidth = 0;
+ error("DISABLED: system.ipv4");
+ } else {
+ ifstatdata = reallocz(ifstatdata, size);
+ if (unlikely(sysctl(mib, 6, ifstatdata, &size, NULL, 0) < 0)) {
+ error("MACOS: sysctl(%s...) failed: %s", "net interfaces", strerror(errno));
+ do_bandwidth = 0;
+ error("DISABLED: system.ipv4");
+ } else {
+ lim = ifstatdata + size;
+ iftot.ift_ibytes = iftot.ift_obytes = 0;
+ for (next = ifstatdata; next < lim; ) {
+ ifm = (struct if_msghdr *)next;
+ next += ifm->ifm_msglen;
+
+ if (ifm->ifm_type == RTM_IFINFO2) {
+ struct if_msghdr2 *if2m = (struct if_msghdr2 *)ifm;
+
+ iftot.ift_ibytes += if2m->ifm_data.ifi_ibytes;
+ iftot.ift_obytes += if2m->ifm_data.ifi_obytes;
+ }
+ }
+ st = rrdset_find_localhost("system.ipv4");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system"
+ , "ipv4"
+ , NULL
+ , "network"
+ , NULL
+ , "IPv4 Bandwidth"
+ , "kilobits/s"
+ , "macos"
+ , "sysctl"
+ , 500
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rrddim_add(st, "InOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "InOctets", iftot.ift_ibytes);
+ rrddim_set(st, "OutOctets", iftot.ift_obytes);
+ rrdset_done(st);
+ }
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ // see http://net-snmp.sourceforge.net/docs/mibs/tcp.html
+ if (likely(do_tcp_packets || do_tcp_errors || do_tcp_handshake || do_tcpext_connaborts || do_tcpext_ofo || do_tcpext_syscookies || do_ecn)) {
+ if (unlikely(GETSYSCTL_BY_NAME("net.inet.tcp.stats", tcpstat))){
+ do_tcp_packets = 0;
+ error("DISABLED: ipv4.tcppackets");
+ do_tcp_errors = 0;
+ error("DISABLED: ipv4.tcperrors");
+ do_tcp_handshake = 0;
+ error("DISABLED: ipv4.tcphandshake");
+ do_tcpext_connaborts = 0;
+ error("DISABLED: ipv4.tcpconnaborts");
+ do_tcpext_ofo = 0;
+ error("DISABLED: ipv4.tcpofo");
+ do_tcpext_syscookies = 0;
+ error("DISABLED: ipv4.tcpsyncookies");
+ do_ecn = 0;
+ error("DISABLED: ipv4.ecnpkts");
+ } else {
+ if (likely(do_tcp_packets)) {
+ st = rrdset_find_localhost("ipv4.tcppackets");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "tcppackets"
+ , NULL
+ , "tcp"
+ , NULL
+ , "IPv4 TCP Packets"
+ , "packets/s"
+ , "macos"
+ , "sysctl"
+ , 2600
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "InSegs", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutSegs", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "InSegs", tcpstat.tcps_rcvtotal);
+ rrddim_set(st, "OutSegs", tcpstat.tcps_sndtotal);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_tcp_errors)) {
+ st = rrdset_find_localhost("ipv4.tcperrors");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "tcperrors"
+ , NULL
+ , "tcp"
+ , NULL
+ , "IPv4 TCP Errors"
+ , "packets/s"
+ , "macos"
+ , "sysctl"
+ , 2700
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "InErrs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "RetransSegs", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "InErrs", tcpstat.tcps_rcvbadoff + tcpstat.tcps_rcvshort);
+ rrddim_set(st, "InCsumErrors", tcpstat.tcps_rcvbadsum);
+ rrddim_set(st, "RetransSegs", tcpstat.tcps_sndrexmitpack);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_tcp_handshake)) {
+ st = rrdset_find_localhost("ipv4.tcphandshake");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "tcphandshake"
+ , NULL
+ , "tcp"
+ , NULL
+ , "IPv4 TCP Handshake Issues"
+ , "events/s"
+ , "macos"
+ , "sysctl"
+ , 2900
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "EstabResets", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "ActiveOpens", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "PassiveOpens", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "AttemptFails", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "EstabResets", tcpstat.tcps_drops);
+ rrddim_set(st, "ActiveOpens", tcpstat.tcps_connattempt);
+ rrddim_set(st, "PassiveOpens", tcpstat.tcps_accepts);
+ rrddim_set(st, "AttemptFails", tcpstat.tcps_conndrops);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_tcpext_connaborts == CONFIG_BOOLEAN_YES || (do_tcpext_connaborts == CONFIG_BOOLEAN_AUTO && (tcpstat.tcps_rcvpackafterwin || tcpstat.tcps_rcvafterclose || tcpstat.tcps_rcvmemdrop || tcpstat.tcps_persistdrop))) {
+ do_tcpext_connaborts = CONFIG_BOOLEAN_YES;
+ st = rrdset_find_localhost("ipv4.tcpconnaborts");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "tcpconnaborts"
+ , NULL
+ , "tcp"
+ , NULL
+ , "TCP Connection Aborts"
+ , "connections/s"
+ , "macos"
+ , "sysctl"
+ , 3010
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "TCPAbortOnData", "baddata", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "TCPAbortOnClose", "userclosed", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "TCPAbortOnMemory", "nomemory", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "TCPAbortOnTimeout", "timeout", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "TCPAbortOnData", tcpstat.tcps_rcvpackafterwin);
+ rrddim_set(st, "TCPAbortOnClose", tcpstat.tcps_rcvafterclose);
+ rrddim_set(st, "TCPAbortOnMemory", tcpstat.tcps_rcvmemdrop);
+ rrddim_set(st, "TCPAbortOnTimeout", tcpstat.tcps_persistdrop);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_tcpext_ofo == CONFIG_BOOLEAN_YES || (do_tcpext_ofo == CONFIG_BOOLEAN_AUTO && tcpstat.tcps_rcvoopack)) {
+ do_tcpext_ofo = CONFIG_BOOLEAN_YES;
+ st = rrdset_find_localhost("ipv4.tcpofo");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "tcpofo"
+ , NULL
+ , "tcp"
+ , NULL
+ , "TCP Out-Of-Order Queue"
+ , "packets/s"
+ , "macos"
+ , "sysctl"
+ , 3050
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "TCPOFOQueue", "inqueue", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "TCPOFOQueue", tcpstat.tcps_rcvoopack);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_tcpext_syscookies == CONFIG_BOOLEAN_YES || (do_tcpext_syscookies == CONFIG_BOOLEAN_AUTO && (tcpstat.tcps_sc_sendcookie || tcpstat.tcps_sc_recvcookie || tcpstat.tcps_sc_zonefail))) {
+ do_tcpext_syscookies = CONFIG_BOOLEAN_YES;
+
+ st = rrdset_find_localhost("ipv4.tcpsyncookies");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "tcpsyncookies"
+ , NULL
+ , "tcp"
+ , NULL
+ , "TCP SYN Cookies"
+ , "packets/s"
+ , "macos"
+ , "sysctl"
+ , 3100
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "SyncookiesRecv", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "SyncookiesSent", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "SyncookiesFailed", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "SyncookiesRecv", tcpstat.tcps_sc_recvcookie);
+ rrddim_set(st, "SyncookiesSent", tcpstat.tcps_sc_sendcookie);
+ rrddim_set(st, "SyncookiesFailed", tcpstat.tcps_sc_zonefail);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+
+#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 101100)
+ if (do_ecn == CONFIG_BOOLEAN_YES || (do_ecn == CONFIG_BOOLEAN_AUTO && (tcpstat.tcps_ecn_recv_ce || tcpstat.tcps_ecn_not_supported))) {
+ do_ecn = CONFIG_BOOLEAN_YES;
+ st = rrdset_find_localhost("ipv4.ecnpkts");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "ecnpkts"
+ , NULL
+ , "ecn"
+ , NULL
+ , "IPv4 ECN Statistics"
+ , "packets/s"
+ , "macos"
+ , "sysctl"
+ , 8700
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "InCEPkts", "CEP", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InNoECTPkts", "NoECTP", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "InCEPkts", tcpstat.tcps_ecn_recv_ce);
+ rrddim_set(st, "InNoECTPkts", tcpstat.tcps_ecn_not_supported);
+ rrdset_done(st);
+ }
+#endif
+
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ // see http://net-snmp.sourceforge.net/docs/mibs/udp.html
+ if (likely(do_udp_packets || do_udp_errors)) {
+ if (unlikely(GETSYSCTL_BY_NAME("net.inet.udp.stats", udpstat))) {
+ do_udp_packets = 0;
+ error("DISABLED: ipv4.udppackets");
+ do_udp_errors = 0;
+ error("DISABLED: ipv4.udperrors");
+ } else {
+ if (likely(do_udp_packets)) {
+ st = rrdset_find_localhost("ipv4.udppackets");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "udppackets"
+ , NULL
+ , "udp"
+ , NULL
+ , "IPv4 UDP Packets"
+ , "packets/s"
+ , "macos"
+ , "sysctl"
+ , 2601
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "InDatagrams", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutDatagrams", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "InDatagrams", udpstat.udps_ipackets);
+ rrddim_set(st, "OutDatagrams", udpstat.udps_opackets);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_udp_errors)) {
+ st = rrdset_find_localhost("ipv4.udperrors");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "udperrors"
+ , NULL
+ , "udp"
+ , NULL
+ , "IPv4 UDP Errors"
+ , "events/s"
+ , "macos"
+ , "sysctl"
+ , 2701
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "RcvbufErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "NoPorts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090)
+ rrddim_add(st, "IgnoredMulti", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+#endif
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "InErrors", udpstat.udps_hdrops + udpstat.udps_badlen);
+ rrddim_set(st, "NoPorts", udpstat.udps_noport);
+ rrddim_set(st, "RcvbufErrors", udpstat.udps_fullsock);
+#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090)
+ rrddim_set(st, "InCsumErrors", udpstat.udps_badsum + udpstat.udps_nosum);
+ rrddim_set(st, "IgnoredMulti", udpstat.udps_filtermcast);
+#else
+ rrddim_set(st, "InCsumErrors", udpstat.udps_badsum);
+#endif
+ rrdset_done(st);
+ }
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_icmp_packets || do_icmpmsg)) {
+ if (unlikely(GETSYSCTL_BY_NAME("net.inet.icmp.stats", icmpstat))) {
+ do_icmp_packets = 0;
+ error("DISABLED: ipv4.icmp");
+ error("DISABLED: ipv4.icmp_errors");
+ do_icmpmsg = 0;
+ error("DISABLED: ipv4.icmpmsg");
+ } else {
+ for (i = 0; i <= ICMP_MAXTYPE; i++) {
+ icmp_total.msgs_in += icmpstat.icps_inhist[i];
+ icmp_total.msgs_out += icmpstat.icps_outhist[i];
+ }
+ icmp_total.msgs_in += icmpstat.icps_badcode + icmpstat.icps_badlen + icmpstat.icps_checksum + icmpstat.icps_tooshort;
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_icmp_packets)) {
+ st = rrdset_find_localhost("ipv4.icmp");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "icmp"
+ , NULL
+ , "icmp"
+ , NULL
+ , "IPv4 ICMP Packets"
+ , "packets/s"
+ , "macos"
+ , "sysctl"
+ , 2602
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "InMsgs", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutMsgs", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "InMsgs", icmp_total.msgs_in);
+ rrddim_set(st, "OutMsgs", icmp_total.msgs_out);
+
+ rrdset_done(st);
+
+ // --------------------------------------------------------------------
+
+ st = rrdset_find_localhost("ipv4.icmp_errors");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "icmp_errors"
+ , NULL
+ , "icmp"
+ , NULL
+ , "IPv4 ICMP Errors"
+ , "packets/s"
+ , "macos"
+ , "sysctl"
+ , 2603
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "InErrors", icmpstat.icps_badcode + icmpstat.icps_badlen + icmpstat.icps_checksum + icmpstat.icps_tooshort);
+ rrddim_set(st, "OutErrors", icmpstat.icps_error);
+ rrddim_set(st, "InCsumErrors", icmpstat.icps_checksum);
+
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_icmpmsg)) {
+ st = rrdset_find_localhost("ipv4.icmpmsg");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "icmpmsg"
+ , NULL
+ , "icmp"
+ , NULL
+ , "IPv4 ICMP Messages"
+ , "packets/s"
+ , "macos"
+ , "sysctl"
+ , 2604
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "InEchoReps", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutEchoReps", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InEchos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutEchos", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "InEchoReps", icmpstat.icps_inhist[ICMP_ECHOREPLY]);
+ rrddim_set(st, "OutEchoReps", icmpstat.icps_outhist[ICMP_ECHOREPLY]);
+ rrddim_set(st, "InEchos", icmpstat.icps_inhist[ICMP_ECHO]);
+ rrddim_set(st, "OutEchos", icmpstat.icps_outhist[ICMP_ECHO]);
+
+ rrdset_done(st);
+ }
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ // see also http://net-snmp.sourceforge.net/docs/mibs/ip.html
+ if (likely(do_ip_packets || do_ip_fragsout || do_ip_fragsin || do_ip_errors)) {
+ if (unlikely(GETSYSCTL_BY_NAME("net.inet.ip.stats", ipstat))) {
+ do_ip_packets = 0;
+ error("DISABLED: ipv4.packets");
+ do_ip_fragsout = 0;
+ error("DISABLED: ipv4.fragsout");
+ do_ip_fragsin = 0;
+ error("DISABLED: ipv4.fragsin");
+ do_ip_errors = 0;
+ error("DISABLED: ipv4.errors");
+ } else {
+ if (likely(do_ip_packets)) {
+ st = rrdset_find_localhost("ipv4.packets");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "packets"
+ , NULL
+ , "packets"
+ , NULL
+ , "IPv4 Packets"
+ , "packets/s"
+ , "macos"
+ , "sysctl"
+ , 3000
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "InReceives", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutRequests", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "ForwDatagrams", "forwarded", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InDelivers", "delivered", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "OutRequests", ipstat.ips_localout);
+ rrddim_set(st, "InReceives", ipstat.ips_total);
+ rrddim_set(st, "ForwDatagrams", ipstat.ips_forward);
+ rrddim_set(st, "InDelivers", ipstat.ips_delivered);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_ip_fragsout)) {
+ st = rrdset_find_localhost("ipv4.fragsout");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "fragsout"
+ , NULL
+ , "fragments"
+ , NULL
+ , "IPv4 Fragments Sent"
+ , "packets/s"
+ , "macos"
+ , "sysctl"
+ , 3010
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "FragOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "FragFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "FragCreates", "created", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "FragOKs", ipstat.ips_fragmented);
+ rrddim_set(st, "FragFails", ipstat.ips_cantfrag);
+ rrddim_set(st, "FragCreates", ipstat.ips_ofragments);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_ip_fragsin)) {
+ st = rrdset_find_localhost("ipv4.fragsin");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "fragsin"
+ , NULL
+ , "fragments"
+ , NULL
+ , "IPv4 Fragments Reassembly"
+ , "packets/s"
+ , "macos"
+ , "sysctl"
+ , 3011
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "ReasmOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "ReasmFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "ReasmReqds", "all", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "ReasmOKs", ipstat.ips_fragments);
+ rrddim_set(st, "ReasmFails", ipstat.ips_fragdropped);
+ rrddim_set(st, "ReasmReqds", ipstat.ips_reassembled);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_ip_errors)) {
+ st = rrdset_find_localhost("ipv4.errors");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "errors"
+ , NULL
+ , "errors"
+ , NULL
+ , "IPv4 Errors"
+ , "packets/s"
+ , "macos"
+ , "sysctl"
+ , 3002
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_add(st, "InHdrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutNoRoutes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_add(st, "InAddrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InUnknownProtos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "InDiscards", ipstat.ips_badsum + ipstat.ips_tooshort + ipstat.ips_toosmall + ipstat.ips_toolong);
+ rrddim_set(st, "OutDiscards", ipstat.ips_odropped);
+ rrddim_set(st, "InHdrErrors", ipstat.ips_badhlen + ipstat.ips_badlen + ipstat.ips_badoptions + ipstat.ips_badvers);
+ rrddim_set(st, "InAddrErrors", ipstat.ips_badaddr);
+ rrddim_set(st, "InUnknownProtos", ipstat.ips_noproto);
+ rrddim_set(st, "OutNoRoutes", ipstat.ips_noroute);
+ rrdset_done(st);
+ }
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_ip6_packets || do_ip6_fragsout || do_ip6_fragsin || do_ip6_errors)) {
+ if (unlikely(GETSYSCTL_BY_NAME("net.inet6.ip6.stats", ip6stat))) {
+ do_ip6_packets = 0;
+ error("DISABLED: ipv6.packets");
+ do_ip6_fragsout = 0;
+ error("DISABLED: ipv6.fragsout");
+ do_ip6_fragsin = 0;
+ error("DISABLED: ipv6.fragsin");
+ do_ip6_errors = 0;
+ error("DISABLED: ipv6.errors");
+ } else {
+ if (do_ip6_packets == CONFIG_BOOLEAN_YES || (do_ip6_packets == CONFIG_BOOLEAN_AUTO &&
+ (ip6stat.ip6s_localout || ip6stat.ip6s_total ||
+ ip6stat.ip6s_forward || ip6stat.ip6s_delivered))) {
+ do_ip6_packets = CONFIG_BOOLEAN_YES;
+ st = rrdset_find_localhost("ipv6.packets");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6"
+ , "packets"
+ , NULL
+ , "packets"
+ , NULL
+ , "IPv6 Packets"
+ , "packets/s"
+ , "macos"
+ , "sysctl"
+ , 3000
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "forwarded", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "delivers", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "sent", ip6stat.ip6s_localout);
+ rrddim_set(st, "received", ip6stat.ip6s_total);
+ rrddim_set(st, "forwarded", ip6stat.ip6s_forward);
+ rrddim_set(st, "delivers", ip6stat.ip6s_delivered);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_ip6_fragsout == CONFIG_BOOLEAN_YES || (do_ip6_fragsout == CONFIG_BOOLEAN_AUTO &&
+ (ip6stat.ip6s_fragmented || ip6stat.ip6s_cantfrag ||
+ ip6stat.ip6s_ofragments))) {
+ do_ip6_fragsout = CONFIG_BOOLEAN_YES;
+ st = rrdset_find_localhost("ipv6.fragsout");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6"
+ , "fragsout"
+ , NULL
+ , "fragments"
+ , NULL
+ , "IPv6 Fragments Sent"
+ , "packets/s"
+ , "macos"
+ , "sysctl"
+ , 3010
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "ok", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "failed", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "all", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "ok", ip6stat.ip6s_fragmented);
+ rrddim_set(st, "failed", ip6stat.ip6s_cantfrag);
+ rrddim_set(st, "all", ip6stat.ip6s_ofragments);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_ip6_fragsin == CONFIG_BOOLEAN_YES || (do_ip6_fragsin == CONFIG_BOOLEAN_AUTO &&
+ (ip6stat.ip6s_reassembled || ip6stat.ip6s_fragdropped ||
+ ip6stat.ip6s_fragtimeout || ip6stat.ip6s_fragments))) {
+ do_ip6_fragsin = CONFIG_BOOLEAN_YES;
+ st = rrdset_find_localhost("ipv6.fragsin");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6"
+ , "fragsin"
+ , NULL
+ , "fragments"
+ , NULL
+ , "IPv6 Fragments Reassembly"
+ , "packets/s"
+ , "macos"
+ , "sysctl"
+ , 3011
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "ok", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "failed", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "timeout", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "all", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "ok", ip6stat.ip6s_reassembled);
+ rrddim_set(st, "failed", ip6stat.ip6s_fragdropped);
+ rrddim_set(st, "timeout", ip6stat.ip6s_fragtimeout);
+ rrddim_set(st, "all", ip6stat.ip6s_fragments);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_ip6_errors == CONFIG_BOOLEAN_YES || (do_ip6_errors == CONFIG_BOOLEAN_AUTO && (
+ ip6stat.ip6s_toosmall ||
+ ip6stat.ip6s_odropped ||
+ ip6stat.ip6s_badoptions ||
+ ip6stat.ip6s_badvers ||
+ ip6stat.ip6s_exthdrtoolong ||
+ ip6stat.ip6s_sources_none ||
+ ip6stat.ip6s_tooshort ||
+ ip6stat.ip6s_cantforward ||
+ ip6stat.ip6s_noroute))) {
+ do_ip6_errors = CONFIG_BOOLEAN_YES;
+ st = rrdset_find_localhost("ipv6.errors");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6"
+ , "errors"
+ , NULL
+ , "errors"
+ , NULL
+ , "IPv6 Errors"
+ , "packets/s"
+ , "macos"
+ , "sysctl"
+ , 3002
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_add(st, "InHdrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InAddrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InTruncatedPkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InNoRoutes", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_add(st, "OutNoRoutes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "InDiscards", ip6stat.ip6s_toosmall);
+ rrddim_set(st, "OutDiscards", ip6stat.ip6s_odropped);
+
+ rrddim_set(st, "InHdrErrors",
+ ip6stat.ip6s_badoptions + ip6stat.ip6s_badvers + ip6stat.ip6s_exthdrtoolong);
+ rrddim_set(st, "InAddrErrors", ip6stat.ip6s_sources_none);
+ rrddim_set(st, "InTruncatedPkts", ip6stat.ip6s_tooshort);
+ rrddim_set(st, "InNoRoutes", ip6stat.ip6s_cantforward);
+
+ rrddim_set(st, "OutNoRoutes", ip6stat.ip6s_noroute);
+ rrdset_done(st);
+ }
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_icmp6 || do_icmp6_redir || do_icmp6_errors || do_icmp6_echos || do_icmp6_router || do_icmp6_neighbor || do_icmp6_types)) {
+ if (unlikely(GETSYSCTL_BY_NAME("net.inet6.icmp6.stats", icmp6stat))) {
+ do_icmp6 = 0;
+ error("DISABLED: ipv6.icmp");
+ } else {
+ for (i = 0; i <= ICMP6_MAXTYPE; i++) {
+ icmp6_total.msgs_in += icmp6stat.icp6s_inhist[i];
+ icmp6_total.msgs_out += icmp6stat.icp6s_outhist[i];
+ }
+ icmp6_total.msgs_in += icmp6stat.icp6s_badcode + icmp6stat.icp6s_badlen + icmp6stat.icp6s_checksum + icmp6stat.icp6s_tooshort;
+ if (do_icmp6 == CONFIG_BOOLEAN_YES || (do_icmp6 == CONFIG_BOOLEAN_AUTO && (icmp6_total.msgs_in || icmp6_total.msgs_out))) {
+ do_icmp6 = CONFIG_BOOLEAN_YES;
+ st = rrdset_find_localhost("ipv6.icmp");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6"
+ , "icmp"
+ , NULL
+ , "icmp"
+ , NULL
+ , "IPv6 ICMP Messages"
+ , "messages/s"
+ , "macos"
+ , "sysctl"
+ , 10000
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "sent", icmp6_total.msgs_in);
+ rrddim_set(st, "received", icmp6_total.msgs_out);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_icmp6_redir == CONFIG_BOOLEAN_YES || (do_icmp6_redir == CONFIG_BOOLEAN_AUTO && (icmp6stat.icp6s_inhist[ND_REDIRECT] || icmp6stat.icp6s_outhist[ND_REDIRECT]))) {
+ do_icmp6_redir = CONFIG_BOOLEAN_YES;
+ st = rrdset_find_localhost("ipv6.icmpredir");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6"
+ , "icmpredir"
+ , NULL
+ , "icmp"
+ , NULL
+ , "IPv6 ICMP Redirects"
+ , "redirects/s"
+ , "macos"
+ , "sysctl"
+ , 10050
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "sent", icmp6stat.icp6s_inhist[ND_REDIRECT]);
+ rrddim_set(st, "received", icmp6stat.icp6s_outhist[ND_REDIRECT]);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_icmp6_errors == CONFIG_BOOLEAN_YES || (do_icmp6_errors == CONFIG_BOOLEAN_AUTO && (
+ icmp6stat.icp6s_badcode ||
+ icmp6stat.icp6s_badlen ||
+ icmp6stat.icp6s_checksum ||
+ icmp6stat.icp6s_tooshort ||
+ icmp6stat.icp6s_error ||
+ icmp6stat.icp6s_inhist[ICMP6_DST_UNREACH] ||
+ icmp6stat.icp6s_inhist[ICMP6_TIME_EXCEEDED] ||
+ icmp6stat.icp6s_inhist[ICMP6_PARAM_PROB] ||
+ icmp6stat.icp6s_outhist[ICMP6_DST_UNREACH] ||
+ icmp6stat.icp6s_outhist[ICMP6_TIME_EXCEEDED] ||
+ icmp6stat.icp6s_outhist[ICMP6_PARAM_PROB]))) {
+ do_icmp6_errors = CONFIG_BOOLEAN_YES;
+ st = rrdset_find_localhost("ipv6.icmperrors");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6"
+ , "icmperrors"
+ , NULL
+ , "icmp"
+ , NULL
+ , "IPv6 ICMP Errors"
+ , "errors/s"
+ , "macos"
+ , "sysctl"
+ , 10100
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InDestUnreachs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InPktTooBigs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InTimeExcds", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InParmProblems", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutDestUnreachs", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutTimeExcds", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutParmProblems", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "InErrors", icmp6stat.icp6s_badcode + icmp6stat.icp6s_badlen + icmp6stat.icp6s_checksum + icmp6stat.icp6s_tooshort);
+ rrddim_set(st, "OutErrors", icmp6stat.icp6s_error);
+ rrddim_set(st, "InCsumErrors", icmp6stat.icp6s_checksum);
+ rrddim_set(st, "InDestUnreachs", icmp6stat.icp6s_inhist[ICMP6_DST_UNREACH]);
+ rrddim_set(st, "InPktTooBigs", icmp6stat.icp6s_badlen);
+ rrddim_set(st, "InTimeExcds", icmp6stat.icp6s_inhist[ICMP6_TIME_EXCEEDED]);
+ rrddim_set(st, "InParmProblems", icmp6stat.icp6s_inhist[ICMP6_PARAM_PROB]);
+ rrddim_set(st, "OutDestUnreachs", icmp6stat.icp6s_outhist[ICMP6_DST_UNREACH]);
+ rrddim_set(st, "OutTimeExcds", icmp6stat.icp6s_outhist[ICMP6_TIME_EXCEEDED]);
+ rrddim_set(st, "OutParmProblems", icmp6stat.icp6s_outhist[ICMP6_PARAM_PROB]);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_icmp6_echos == CONFIG_BOOLEAN_YES || (do_icmp6_echos == CONFIG_BOOLEAN_AUTO && (
+ icmp6stat.icp6s_inhist[ICMP6_ECHO_REQUEST] ||
+ icmp6stat.icp6s_outhist[ICMP6_ECHO_REQUEST] ||
+ icmp6stat.icp6s_inhist[ICMP6_ECHO_REPLY] ||
+ icmp6stat.icp6s_outhist[ICMP6_ECHO_REPLY]))) {
+ do_icmp6_echos = CONFIG_BOOLEAN_YES;
+ st = rrdset_find_localhost("ipv6.icmpechos");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6"
+ , "icmpechos"
+ , NULL
+ , "icmp"
+ , NULL
+ , "IPv6 ICMP Echo"
+ , "messages/s"
+ , "macos"
+ , "sysctl"
+ , 10200
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "InEchos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutEchos", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InEchoReplies", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutEchoReplies", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "InEchos", icmp6stat.icp6s_inhist[ICMP6_ECHO_REQUEST]);
+ rrddim_set(st, "OutEchos", icmp6stat.icp6s_outhist[ICMP6_ECHO_REQUEST]);
+ rrddim_set(st, "InEchoReplies", icmp6stat.icp6s_inhist[ICMP6_ECHO_REPLY]);
+ rrddim_set(st, "OutEchoReplies", icmp6stat.icp6s_outhist[ICMP6_ECHO_REPLY]);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_icmp6_router == CONFIG_BOOLEAN_YES || (do_icmp6_router == CONFIG_BOOLEAN_AUTO && (
+ icmp6stat.icp6s_inhist[ND_ROUTER_SOLICIT] ||
+ icmp6stat.icp6s_outhist[ND_ROUTER_SOLICIT] ||
+ icmp6stat.icp6s_inhist[ND_ROUTER_ADVERT] ||
+ icmp6stat.icp6s_outhist[ND_ROUTER_ADVERT]))) {
+ do_icmp6_router = CONFIG_BOOLEAN_YES;
+ st = rrdset_find_localhost("ipv6.icmprouter");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6"
+ , "icmprouter"
+ , NULL
+ , "icmp"
+ , NULL
+ , "IPv6 Router Messages"
+ , "messages/s"
+ , "macos"
+ , "sysctl"
+ , 10400
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "InSolicits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutSolicits", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InAdvertisements", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutAdvertisements", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "InSolicits", icmp6stat.icp6s_inhist[ND_ROUTER_SOLICIT]);
+ rrddim_set(st, "OutSolicits", icmp6stat.icp6s_outhist[ND_ROUTER_SOLICIT]);
+ rrddim_set(st, "InAdvertisements", icmp6stat.icp6s_inhist[ND_ROUTER_ADVERT]);
+ rrddim_set(st, "OutAdvertisements", icmp6stat.icp6s_outhist[ND_ROUTER_ADVERT]);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_icmp6_neighbor == CONFIG_BOOLEAN_YES || (do_icmp6_neighbor == CONFIG_BOOLEAN_AUTO && (
+ icmp6stat.icp6s_inhist[ND_NEIGHBOR_SOLICIT] ||
+ icmp6stat.icp6s_outhist[ND_NEIGHBOR_SOLICIT] ||
+ icmp6stat.icp6s_inhist[ND_NEIGHBOR_ADVERT] ||
+ icmp6stat.icp6s_outhist[ND_NEIGHBOR_ADVERT]))) {
+ do_icmp6_neighbor = CONFIG_BOOLEAN_YES;
+ st = rrdset_find_localhost("ipv6.icmpneighbor");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6"
+ , "icmpneighbor"
+ , NULL
+ , "icmp"
+ , NULL
+ , "IPv6 Neighbor Messages"
+ , "messages/s"
+ , "macos"
+ , "sysctl"
+ , 10500
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "InSolicits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutSolicits", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InAdvertisements", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutAdvertisements", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "InSolicits", icmp6stat.icp6s_inhist[ND_NEIGHBOR_SOLICIT]);
+ rrddim_set(st, "OutSolicits", icmp6stat.icp6s_outhist[ND_NEIGHBOR_SOLICIT]);
+ rrddim_set(st, "InAdvertisements", icmp6stat.icp6s_inhist[ND_NEIGHBOR_ADVERT]);
+ rrddim_set(st, "OutAdvertisements", icmp6stat.icp6s_outhist[ND_NEIGHBOR_ADVERT]);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (do_icmp6_types == CONFIG_BOOLEAN_YES || (do_icmp6_types == CONFIG_BOOLEAN_AUTO && (
+ icmp6stat.icp6s_inhist[1] ||
+ icmp6stat.icp6s_inhist[128] ||
+ icmp6stat.icp6s_inhist[129] ||
+ icmp6stat.icp6s_inhist[136] ||
+ icmp6stat.icp6s_outhist[1] ||
+ icmp6stat.icp6s_outhist[128] ||
+ icmp6stat.icp6s_outhist[129] ||
+ icmp6stat.icp6s_outhist[133] ||
+ icmp6stat.icp6s_outhist[135] ||
+ icmp6stat.icp6s_outhist[136]))) {
+ do_icmp6_types = CONFIG_BOOLEAN_YES;
+ st = rrdset_find_localhost("ipv6.icmptypes");
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6"
+ , "icmptypes"
+ , NULL
+ , "icmp"
+ , NULL
+ , "IPv6 ICMP Types"
+ , "messages/s"
+ , "macos"
+ , "sysctl"
+ , 10700
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "InType1", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InType128", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InType129", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "InType136", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutType1", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutType128", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutType129", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutType133", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutType135", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutType143", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set(st, "InType1", icmp6stat.icp6s_inhist[1]);
+ rrddim_set(st, "InType128", icmp6stat.icp6s_inhist[128]);
+ rrddim_set(st, "InType129", icmp6stat.icp6s_inhist[129]);
+ rrddim_set(st, "InType136", icmp6stat.icp6s_inhist[136]);
+ rrddim_set(st, "OutType1", icmp6stat.icp6s_outhist[1]);
+ rrddim_set(st, "OutType128", icmp6stat.icp6s_outhist[128]);
+ rrddim_set(st, "OutType129", icmp6stat.icp6s_outhist[129]);
+ rrddim_set(st, "OutType133", icmp6stat.icp6s_outhist[133]);
+ rrddim_set(st, "OutType135", icmp6stat.icp6s_outhist[135]);
+ rrddim_set(st, "OutType143", icmp6stat.icp6s_outhist[143]);
+ rrdset_done(st);
+ }
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_uptime)) {
+ if (unlikely(GETSYSCTL_BY_NAME("kern.boottime", boot_time))) {
+ do_uptime = 0;
+ error("DISABLED: system.uptime");
+ } else {
+ clock_gettime(CLOCK_REALTIME, &cur_time);
+ st = rrdset_find_localhost("system.uptime");
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system"
+ , "uptime"
+ , NULL
+ , "uptime"
+ , NULL
+ , "System Uptime"
+ , "seconds"
+ , "macos"
+ , "sysctl"
+ , 1000
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrddim_add(st, "uptime", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "uptime", cur_time.tv_sec - boot_time.tv_sec);
+ rrdset_done(st);
+ }
+ }
+
+ return 0;
+}
+
diff --git a/collectors/macos.plugin/plugin_macos.c b/collectors/macos.plugin/plugin_macos.c
new file mode 100644
index 000000000..628a5b10d
--- /dev/null
+++ b/collectors/macos.plugin/plugin_macos.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_macos.h"
+
+static void macos_main_cleanup(void *ptr) {
+ struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
+
+ info("cleaning up...");
+
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+}
+
+void *macos_main(void *ptr) {
+ netdata_thread_cleanup_push(macos_main_cleanup, ptr);
+
+ // when ZERO, attempt to do it
+ int vdo_cpu_netdata = !config_get_boolean("plugin:macos", "netdata server resources", 1);
+ int vdo_macos_sysctl = !config_get_boolean("plugin:macos", "sysctl", 1);
+ int vdo_macos_mach_smi = !config_get_boolean("plugin:macos", "mach system management interface", 1);
+ int vdo_macos_iokit = !config_get_boolean("plugin:macos", "iokit", 1);
+
+ // keep track of the time each module was called
+ unsigned long long sutime_macos_sysctl = 0ULL;
+ unsigned long long sutime_macos_mach_smi = 0ULL;
+ unsigned long long sutime_macos_iokit = 0ULL;
+
+ usec_t step = localhost->rrd_update_every * USEC_PER_SEC;
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+
+ while(!netdata_exit) {
+ usec_t hb_dt = heartbeat_next(&hb, step);
+
+ if(unlikely(netdata_exit)) break;
+
+ // BEGIN -- the job to be done
+
+ if(!vdo_macos_sysctl) {
+ debug(D_PROCNETDEV_LOOP, "MACOS: calling do_macos_sysctl().");
+ vdo_macos_sysctl = do_macos_sysctl(localhost->rrd_update_every, hb_dt);
+ }
+ if(unlikely(netdata_exit)) break;
+
+ if(!vdo_macos_mach_smi) {
+ debug(D_PROCNETDEV_LOOP, "MACOS: calling do_macos_mach_smi().");
+ vdo_macos_mach_smi = do_macos_mach_smi(localhost->rrd_update_every, hb_dt);
+ }
+ if(unlikely(netdata_exit)) break;
+
+ if(!vdo_macos_iokit) {
+ debug(D_PROCNETDEV_LOOP, "MACOS: calling do_macos_iokit().");
+ vdo_macos_iokit = do_macos_iokit(localhost->rrd_update_every, hb_dt);
+ }
+ if(unlikely(netdata_exit)) break;
+
+ // END -- the job is done
+
+ // --------------------------------------------------------------------
+
+ if(!vdo_cpu_netdata) {
+ global_statistics_charts();
+ registry_statistics();
+ }
+ }
+
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
diff --git a/collectors/macos.plugin/plugin_macos.h b/collectors/macos.plugin/plugin_macos.h
new file mode 100644
index 000000000..0815c59c3
--- /dev/null
+++ b/collectors/macos.plugin/plugin_macos.h
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+
+#ifndef NETDATA_PLUGIN_MACOS_H
+#define NETDATA_PLUGIN_MACOS_H 1
+
+#include "../../daemon/common.h"
+
+#if (TARGET_OS == OS_MACOS)
+
+#define NETDATA_PLUGIN_HOOK_MACOS \
+ { \
+ .name = "PLUGIN[macos]", \
+ .config_section = CONFIG_SECTION_PLUGINS, \
+ .config_name = "macos", \
+ .enabled = 1, \
+ .thread = NULL, \
+ .init_routine = NULL, \
+ .start_routine = macos_main \
+ },
+
+void *macos_main(void *ptr);
+
+#define GETSYSCTL_BY_NAME(name, var) getsysctl_by_name(name, &(var), sizeof(var))
+
+extern int getsysctl_by_name(const char *name, void *ptr, size_t len);
+
+extern int do_macos_sysctl(int update_every, usec_t dt);
+extern int do_macos_mach_smi(int update_every, usec_t dt);
+extern int do_macos_iokit(int update_every, usec_t dt);
+
+
+#else // (TARGET_OS == OS_MACOS)
+
+#define NETDATA_PLUGIN_HOOK_MACOS
+
+#endif // (TARGET_OS == OS_MACOS)
+
+
+
+
+
+#endif /* NETDATA_PLUGIN_MACOS_H */
diff --git a/collectors/nfacct.plugin/Makefile.am b/collectors/nfacct.plugin/Makefile.am
new file mode 100644
index 000000000..19554bed8
--- /dev/null
+++ b/collectors/nfacct.plugin/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/collectors/nfacct.plugin/Makefile.in b/collectors/nfacct.plugin/Makefile.in
new file mode 100644
index 000000000..2a1d001de
--- /dev/null
+++ b/collectors/nfacct.plugin/Makefile.in
@@ -0,0 +1,464 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = collectors/nfacct.plugin
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
+ $(dist_noinst_DATA)
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/nfacct.plugin/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu collectors/nfacct.plugin/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/collectors/nfacct.plugin/README.md b/collectors/nfacct.plugin/README.md
new file mode 100644
index 000000000..814b47915
--- /dev/null
+++ b/collectors/nfacct.plugin/README.md
@@ -0,0 +1,10 @@
+# nfacct.plugin
+
+This plugin that collects NFACCT statistics.
+
+It is currently disabled by default, because it requires root access.
+We have to move the code to an external plugin to setuid just the plugin not the whole netdata server.
+
+You can build netdata with it to test it though.
+Just run `./configure` (or `netdata-installer.sh`) with the option `--enable-plugin-nfacct` (and any other options you may need).
+Remember, you have to tell netdata you want it to run as `root` for this plugin to work.
diff --git a/collectors/nfacct.plugin/plugin_nfacct.c b/collectors/nfacct.plugin/plugin_nfacct.c
new file mode 100644
index 000000000..7d42dd189
--- /dev/null
+++ b/collectors/nfacct.plugin/plugin_nfacct.c
@@ -0,0 +1,822 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_nfacct.h"
+
+#if defined(INTERNAL_PLUGIN_NFACCT)
+
+#define PLUGIN_NFACCT_NAME "nfacct.plugin"
+
+#ifdef HAVE_LIBMNL
+#include <libmnl/libmnl.h>
+
+static inline size_t mnl_buffer_size() {
+ long s = MNL_SOCKET_BUFFER_SIZE;
+ if(s <= 0) return 8192;
+ return (size_t)s;
+}
+
+// ----------------------------------------------------------------------------
+// DO_NFSTAT - collect netfilter connection tracker statistics via netlink
+// example: https://github.com/formorer/pkg-conntrack-tools/blob/master/src/conntrack.c
+
+#ifdef HAVE_LINUX_NETFILTER_NFNETLINK_CONNTRACK_H
+#define DO_NFSTAT 1
+
+#define RRD_TYPE_NET_STAT_NETFILTER "netfilter"
+#define RRD_TYPE_NET_STAT_CONNTRACK "netlink"
+
+#include <linux/netfilter/nfnetlink_conntrack.h>
+
+static struct {
+ int update_every;
+ char *buf;
+ size_t buf_size;
+ struct mnl_socket *mnl;
+ struct nlmsghdr *nlh;
+ struct nfgenmsg *nfh;
+ unsigned int seq;
+ uint32_t portid;
+
+ struct nlattr *tb[CTA_STATS_MAX+1];
+ const char *attr2name[CTA_STATS_MAX+1];
+ kernel_uint_t metrics[CTA_STATS_MAX+1];
+
+ struct nlattr *tb_exp[CTA_STATS_EXP_MAX+1];
+ const char *attr2name_exp[CTA_STATS_EXP_MAX+1];
+ kernel_uint_t metrics_exp[CTA_STATS_EXP_MAX+1];
+} nfstat_root = {
+ .update_every = 1,
+ .buf = NULL,
+ .buf_size = 0,
+ .mnl = NULL,
+ .nlh = NULL,
+ .nfh = NULL,
+ .seq = 0,
+ .portid = 0,
+ .tb = {},
+ .attr2name = {
+ [CTA_STATS_SEARCHED] = "searched",
+ [CTA_STATS_FOUND] = "found",
+ [CTA_STATS_NEW] = "new",
+ [CTA_STATS_INVALID] = "invalid",
+ [CTA_STATS_IGNORE] = "ignore",
+ [CTA_STATS_DELETE] = "delete",
+ [CTA_STATS_DELETE_LIST] = "delete_list",
+ [CTA_STATS_INSERT] = "insert",
+ [CTA_STATS_INSERT_FAILED] = "insert_failed",
+ [CTA_STATS_DROP] = "drop",
+ [CTA_STATS_EARLY_DROP] = "early_drop",
+ [CTA_STATS_ERROR] = "icmp_error",
+ [CTA_STATS_SEARCH_RESTART] = "search_restart",
+ },
+ .metrics = {},
+ .tb_exp = {},
+ .attr2name_exp = {
+ [CTA_STATS_EXP_NEW] = "new",
+ [CTA_STATS_EXP_CREATE] = "created",
+ [CTA_STATS_EXP_DELETE] = "deleted",
+ },
+ .metrics_exp = {}
+};
+
+
+static int nfstat_init(int update_every) {
+ nfstat_root.update_every = update_every;
+
+ nfstat_root.buf_size = mnl_buffer_size();
+ nfstat_root.buf = mallocz(nfstat_root.buf_size);
+
+ nfstat_root.mnl = mnl_socket_open(NETLINK_NETFILTER);
+ if(!nfstat_root.mnl) {
+ error("NFSTAT: mnl_socket_open() failed");
+ return 1;
+ }
+
+ nfstat_root.seq = (unsigned int)now_realtime_sec() - 1;
+
+ if(mnl_socket_bind(nfstat_root.mnl, 0, MNL_SOCKET_AUTOPID) < 0) {
+ error("NFSTAT: mnl_socket_bind() failed");
+ return 1;
+ }
+ nfstat_root.portid = mnl_socket_get_portid(nfstat_root.mnl);
+
+ return 0;
+}
+
+static void nfstat_cleanup() {
+ if(nfstat_root.mnl) {
+ mnl_socket_close(nfstat_root.mnl);
+ nfstat_root.mnl = NULL;
+ }
+
+ freez(nfstat_root.buf);
+ nfstat_root.buf = NULL;
+ nfstat_root.buf_size = 0;
+}
+
+static struct nlmsghdr * nfct_mnl_nlmsghdr_put(char *buf, uint16_t subsys, uint16_t type, uint8_t family, uint32_t seq) {
+ struct nlmsghdr *nlh;
+ struct nfgenmsg *nfh;
+
+ nlh = mnl_nlmsg_put_header(buf);
+ nlh->nlmsg_type = (subsys << 8) | type;
+ nlh->nlmsg_flags = NLM_F_REQUEST|NLM_F_DUMP;
+ nlh->nlmsg_seq = seq;
+
+ nfh = mnl_nlmsg_put_extra_header(nlh, sizeof(struct nfgenmsg));
+ nfh->nfgen_family = family;
+ nfh->version = NFNETLINK_V0;
+ nfh->res_id = 0;
+
+ return nlh;
+}
+
+static int nfct_stats_attr_cb(const struct nlattr *attr, void *data) {
+ const struct nlattr **tb = data;
+ int type = mnl_attr_get_type(attr);
+
+ if (mnl_attr_type_valid(attr, CTA_STATS_MAX) < 0)
+ return MNL_CB_OK;
+
+ if (mnl_attr_validate(attr, MNL_TYPE_U32) < 0) {
+ error("NFSTAT: mnl_attr_validate() failed");
+ return MNL_CB_ERROR;
+ }
+
+ tb[type] = attr;
+ return MNL_CB_OK;
+}
+
+static int nfstat_callback(const struct nlmsghdr *nlh, void *data) {
+ (void)data;
+
+ struct nfgenmsg *nfg = mnl_nlmsg_get_payload(nlh);
+
+ mnl_attr_parse(nlh, sizeof(*nfg), nfct_stats_attr_cb, nfstat_root.tb);
+
+ // printf("cpu=%-4u\t", ntohs(nfg->res_id));
+
+ int i;
+ // add the metrics of this CPU into the metrics
+ for (i = 0; i < CTA_STATS_MAX+1; i++) {
+ if (nfstat_root.tb[i]) {
+ // printf("%s=%u ", nfstat_root.attr2name[i], ntohl(mnl_attr_get_u32(nfstat_root.tb[i])));
+ nfstat_root.metrics[i] += ntohl(mnl_attr_get_u32(nfstat_root.tb[i]));
+ }
+ }
+ // printf("\n");
+
+ return MNL_CB_OK;
+}
+
+static int nfstat_collect_conntrack() {
+ // zero all metrics - we will sum the metrics of all CPUs later
+ int i;
+ for (i = 0; i < CTA_STATS_MAX+1; i++)
+ nfstat_root.metrics[i] = 0;
+
+ // prepare the request
+ nfstat_root.nlh = nfct_mnl_nlmsghdr_put(nfstat_root.buf, NFNL_SUBSYS_CTNETLINK, IPCTNL_MSG_CT_GET_STATS_CPU, AF_UNSPEC, nfstat_root.seq);
+
+ // send the request
+ if(mnl_socket_sendto(nfstat_root.mnl, nfstat_root.nlh, nfstat_root.nlh->nlmsg_len) < 0) {
+ error("NFSTAT: mnl_socket_sendto() failed");
+ return 1;
+ }
+
+ // get the reply
+ ssize_t ret;
+ while ((ret = mnl_socket_recvfrom(nfstat_root.mnl, nfstat_root.buf, nfstat_root.buf_size)) > 0) {
+ if(mnl_cb_run(
+ nfstat_root.buf
+ , (size_t)ret
+ , nfstat_root.nlh->nlmsg_seq
+ , nfstat_root.portid
+ , nfstat_callback
+ , NULL
+ ) <= MNL_CB_STOP)
+ break;
+ }
+
+ // verify we run without issues
+ if (ret == -1) {
+ error("NFSTAT: error communicating with kernel. This plugin can only work when netdata runs as root.");
+ return 1;
+ }
+
+ return 0;
+}
+
+static int nfexp_stats_attr_cb(const struct nlattr *attr, void *data)
+{
+ const struct nlattr **tb = data;
+ int type = mnl_attr_get_type(attr);
+
+ if (mnl_attr_type_valid(attr, CTA_STATS_EXP_MAX) < 0)
+ return MNL_CB_OK;
+
+ if (mnl_attr_validate(attr, MNL_TYPE_U32) < 0) {
+ error("NFSTAT EXP: mnl_attr_validate() failed");
+ return MNL_CB_ERROR;
+ }
+
+ tb[type] = attr;
+ return MNL_CB_OK;
+}
+
+static int nfstat_callback_exp(const struct nlmsghdr *nlh, void *data) {
+ (void)data;
+
+ struct nfgenmsg *nfg = mnl_nlmsg_get_payload(nlh);
+
+ mnl_attr_parse(nlh, sizeof(*nfg), nfexp_stats_attr_cb, nfstat_root.tb_exp);
+
+ int i;
+ for (i = 0; i < CTA_STATS_EXP_MAX+1; i++) {
+ if (nfstat_root.tb_exp[i]) {
+ nfstat_root.metrics_exp[i] += ntohl(mnl_attr_get_u32(nfstat_root.tb_exp[i]));
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+static int nfstat_collect_conntrack_expectations() {
+ // zero all metrics - we will sum the metrics of all CPUs later
+ int i;
+ for (i = 0; i < CTA_STATS_EXP_MAX+1; i++)
+ nfstat_root.metrics_exp[i] = 0;
+
+ // prepare the request
+ nfstat_root.nlh = nfct_mnl_nlmsghdr_put(nfstat_root.buf, NFNL_SUBSYS_CTNETLINK_EXP, IPCTNL_MSG_EXP_GET_STATS_CPU, AF_UNSPEC, nfstat_root.seq);
+
+ // send the request
+ if(mnl_socket_sendto(nfstat_root.mnl, nfstat_root.nlh, nfstat_root.nlh->nlmsg_len) < 0) {
+ error("NFSTAT: mnl_socket_sendto() failed");
+ return 1;
+ }
+
+ // get the reply
+ ssize_t ret;
+ while ((ret = mnl_socket_recvfrom(nfstat_root.mnl, nfstat_root.buf, nfstat_root.buf_size)) > 0) {
+ if(mnl_cb_run(
+ nfstat_root.buf
+ , (size_t)ret
+ , nfstat_root.nlh->nlmsg_seq
+ , nfstat_root.portid
+ , nfstat_callback_exp
+ , NULL
+ ) <= MNL_CB_STOP)
+ break;
+ }
+
+ // verify we run without issues
+ if (ret == -1) {
+ error("NFSTAT: error communicating with kernel. This plugin can only work when netdata runs as root.");
+ return 1;
+ }
+
+ return 0;
+}
+
+static int nfstat_collect() {
+ nfstat_root.seq++;
+
+ if(nfstat_collect_conntrack())
+ return 1;
+
+ if(nfstat_collect_conntrack_expectations())
+ return 1;
+
+ return 0;
+}
+
+static void nfstat_send_metrics() {
+
+ {
+ static RRDSET *st_new = NULL;
+ static RRDDIM *rd_new = NULL, *rd_ignore = NULL, *rd_invalid = NULL;
+
+ if(!st_new) {
+ st_new = rrdset_create_localhost(
+ RRD_TYPE_NET_STAT_NETFILTER
+ , RRD_TYPE_NET_STAT_CONNTRACK "_new"
+ , NULL
+ , RRD_TYPE_NET_STAT_CONNTRACK
+ , NULL
+ , "Connection Tracker New Connections"
+ , "connections/s"
+ , PLUGIN_NFACCT_NAME
+ , NULL
+ , NETDATA_CHART_PRIO_NETFILTER_NEW
+ , nfstat_root.update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_new = rrddim_add(st_new, nfstat_root.attr2name[CTA_STATS_NEW], NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_ignore = rrddim_add(st_new, nfstat_root.attr2name[CTA_STATS_IGNORE], NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_invalid = rrddim_add(st_new, nfstat_root.attr2name[CTA_STATS_INVALID], NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_new);
+
+ rrddim_set_by_pointer(st_new, rd_new, (collected_number) nfstat_root.metrics[CTA_STATS_NEW]);
+ rrddim_set_by_pointer(st_new, rd_ignore, (collected_number) nfstat_root.metrics[CTA_STATS_IGNORE]);
+ rrddim_set_by_pointer(st_new, rd_invalid, (collected_number) nfstat_root.metrics[CTA_STATS_INVALID]);
+
+ rrdset_done(st_new);
+ }
+
+ // ----------------------------------------------------------------
+
+ {
+ static RRDSET *st_changes = NULL;
+ static RRDDIM *rd_inserted = NULL, *rd_deleted = NULL, *rd_delete_list = NULL;
+
+ if(!st_changes) {
+ st_changes = rrdset_create_localhost(
+ RRD_TYPE_NET_STAT_NETFILTER
+ , RRD_TYPE_NET_STAT_CONNTRACK "_changes"
+ , NULL
+ , RRD_TYPE_NET_STAT_CONNTRACK
+ , NULL
+ , "Connection Tracker Changes"
+ , "changes/s"
+ , PLUGIN_NFACCT_NAME
+ , NULL
+ , NETDATA_CHART_PRIO_NETFILTER_CHANGES
+ , nfstat_root.update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st_changes, RRDSET_FLAG_DETAIL);
+
+ rd_inserted = rrddim_add(st_changes, nfstat_root.attr2name[CTA_STATS_INSERT], NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_deleted = rrddim_add(st_changes, nfstat_root.attr2name[CTA_STATS_DELETE], NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_delete_list = rrddim_add(st_changes, nfstat_root.attr2name[CTA_STATS_DELETE_LIST], NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_changes);
+
+ rrddim_set_by_pointer(st_changes, rd_inserted, (collected_number) nfstat_root.metrics[CTA_STATS_INSERT]);
+ rrddim_set_by_pointer(st_changes, rd_deleted, (collected_number) nfstat_root.metrics[CTA_STATS_DELETE]);
+ rrddim_set_by_pointer(st_changes, rd_delete_list, (collected_number) nfstat_root.metrics[CTA_STATS_DELETE_LIST]);
+
+ rrdset_done(st_changes);
+ }
+
+ // ----------------------------------------------------------------
+
+ {
+ static RRDSET *st_search = NULL;
+ static RRDDIM *rd_searched = NULL, *rd_restarted = NULL, *rd_found = NULL;
+
+ if(!st_search) {
+ st_search = rrdset_create_localhost(
+ RRD_TYPE_NET_STAT_NETFILTER
+ , RRD_TYPE_NET_STAT_CONNTRACK "_search"
+ , NULL
+ , RRD_TYPE_NET_STAT_CONNTRACK
+ , NULL
+ , "Connection Tracker Searches"
+ , "searches/s"
+ , PLUGIN_NFACCT_NAME
+ , NULL
+ , NETDATA_CHART_PRIO_NETFILTER_SEARCH
+ , nfstat_root.update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st_search, RRDSET_FLAG_DETAIL);
+
+ rd_searched = rrddim_add(st_search, nfstat_root.attr2name[CTA_STATS_SEARCHED], NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_restarted = rrddim_add(st_search, nfstat_root.attr2name[CTA_STATS_SEARCH_RESTART], NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_found = rrddim_add(st_search, nfstat_root.attr2name[CTA_STATS_FOUND], NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_search);
+
+ rrddim_set_by_pointer(st_search, rd_searched, (collected_number) nfstat_root.metrics[CTA_STATS_SEARCHED]);
+ rrddim_set_by_pointer(st_search, rd_restarted, (collected_number) nfstat_root.metrics[CTA_STATS_SEARCH_RESTART]);
+ rrddim_set_by_pointer(st_search, rd_found, (collected_number) nfstat_root.metrics[CTA_STATS_FOUND]);
+
+ rrdset_done(st_search);
+ }
+
+ // ----------------------------------------------------------------
+
+ {
+ static RRDSET *st_errors = NULL;
+ static RRDDIM *rd_error = NULL, *rd_insert_failed = NULL, *rd_drop = NULL, *rd_early_drop = NULL;
+
+ if(!st_errors) {
+ st_errors = rrdset_create_localhost(
+ RRD_TYPE_NET_STAT_NETFILTER
+ , RRD_TYPE_NET_STAT_CONNTRACK "_errors"
+ , NULL
+ , RRD_TYPE_NET_STAT_CONNTRACK
+ , NULL
+ , "Connection Tracker Errors"
+ , "events/s"
+ , PLUGIN_NFACCT_NAME
+ , NULL
+ , NETDATA_CHART_PRIO_NETFILTER_ERRORS
+ , nfstat_root.update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st_errors, RRDSET_FLAG_DETAIL);
+
+ rd_error = rrddim_add(st_errors, nfstat_root.attr2name[CTA_STATS_ERROR], NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_insert_failed = rrddim_add(st_errors, nfstat_root.attr2name[CTA_STATS_INSERT_FAILED], NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_drop = rrddim_add(st_errors, nfstat_root.attr2name[CTA_STATS_DROP], NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_early_drop = rrddim_add(st_errors, nfstat_root.attr2name[CTA_STATS_EARLY_DROP], NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_errors);
+
+ rrddim_set_by_pointer(st_errors, rd_error, (collected_number) nfstat_root.metrics[CTA_STATS_ERROR]);
+ rrddim_set_by_pointer(st_errors, rd_insert_failed, (collected_number) nfstat_root.metrics[CTA_STATS_INSERT_FAILED]);
+ rrddim_set_by_pointer(st_errors, rd_drop, (collected_number) nfstat_root.metrics[CTA_STATS_DROP]);
+ rrddim_set_by_pointer(st_errors, rd_early_drop, (collected_number) nfstat_root.metrics[CTA_STATS_EARLY_DROP]);
+
+ rrdset_done(st_errors);
+ }
+
+ // ----------------------------------------------------------------
+
+ {
+ static RRDSET *st_expect = NULL;
+ static RRDDIM *rd_new = NULL, *rd_created = NULL, *rd_deleted = NULL;
+
+ if(!st_expect) {
+ st_expect = rrdset_create_localhost(
+ RRD_TYPE_NET_STAT_NETFILTER
+ , RRD_TYPE_NET_STAT_CONNTRACK "_expect"
+ , NULL
+ , RRD_TYPE_NET_STAT_CONNTRACK
+ , NULL
+ , "Connection Tracker Expectations"
+ , "expectations/s"
+ , PLUGIN_NFACCT_NAME
+ , NULL
+ , NETDATA_CHART_PRIO_NETFILTER_EXPECT
+ , nfstat_root.update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st_expect, RRDSET_FLAG_DETAIL);
+
+ rd_created = rrddim_add(st_expect, nfstat_root.attr2name_exp[CTA_STATS_EXP_CREATE], NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_deleted = rrddim_add(st_expect, nfstat_root.attr2name_exp[CTA_STATS_EXP_DELETE], NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_new = rrddim_add(st_expect, nfstat_root.attr2name_exp[CTA_STATS_EXP_NEW], NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_expect);
+
+ rrddim_set_by_pointer(st_expect, rd_created, (collected_number) nfstat_root.metrics_exp[CTA_STATS_EXP_CREATE]);
+ rrddim_set_by_pointer(st_expect, rd_deleted, (collected_number) nfstat_root.metrics_exp[CTA_STATS_EXP_DELETE]);
+ rrddim_set_by_pointer(st_expect, rd_new, (collected_number) nfstat_root.metrics_exp[CTA_STATS_EXP_NEW]);
+
+ rrdset_done(st_expect);
+ }
+
+}
+
+#endif // HAVE_LINUX_NETFILTER_NFNETLINK_CONNTRACK_H
+
+
+// ----------------------------------------------------------------------------
+// DO_NFACCT - collect netfilter accounting statistics via netlink
+
+#ifdef HAVE_LIBNETFILTER_ACCT
+#define DO_NFACCT 1
+
+#include <libnetfilter_acct/libnetfilter_acct.h>
+
+struct nfacct_data {
+ char *name;
+ uint32_t hash;
+
+ uint64_t pkts;
+ uint64_t bytes;
+
+ RRDDIM *rd_bytes;
+ RRDDIM *rd_packets;
+
+ int updated;
+
+ struct nfacct_data *next;
+};
+
+static struct {
+ int update_every;
+ char *buf;
+ size_t buf_size;
+ struct mnl_socket *mnl;
+ struct nlmsghdr *nlh;
+ unsigned int seq;
+ uint32_t portid;
+ struct nfacct *nfacct_buffer;
+ struct nfacct_data *nfacct_metrics;
+} nfacct_root = {
+ .update_every = 1,
+ .buf = NULL,
+ .buf_size = 0,
+ .mnl = NULL,
+ .nlh = NULL,
+ .seq = 0,
+ .portid = 0,
+ .nfacct_buffer = NULL,
+ .nfacct_metrics = NULL
+};
+
+static inline struct nfacct_data *nfacct_data_get(const char *name, uint32_t hash) {
+ struct nfacct_data *d = NULL, *last = NULL;
+ for(d = nfacct_root.nfacct_metrics; d ; last = d, d = d->next) {
+ if(unlikely(d->hash == hash && !strcmp(d->name, name)))
+ return d;
+ }
+
+ d = callocz(1, sizeof(struct nfacct_data));
+ d->name = strdupz(name);
+ d->hash = hash;
+
+ if(!last) {
+ d->next = nfacct_root.nfacct_metrics;
+ nfacct_root.nfacct_metrics = d;
+ }
+ else {
+ d->next = last->next;
+ last->next = d;
+ }
+
+ return d;
+}
+
+static int nfacct_init(int update_every) {
+ nfacct_root.update_every = update_every;
+
+ nfacct_root.buf_size = mnl_buffer_size();
+ nfacct_root.buf = mallocz(nfacct_root.buf_size);
+
+ nfacct_root.nfacct_buffer = nfacct_alloc();
+ if(!nfacct_root.nfacct_buffer) {
+ error("nfacct.plugin: nfacct_alloc() failed.");
+ return 0;
+ }
+
+ nfacct_root.seq = (unsigned int)now_realtime_sec() - 1;
+
+ nfacct_root.mnl = mnl_socket_open(NETLINK_NETFILTER);
+ if(!nfacct_root.mnl) {
+ error("nfacct.plugin: mnl_socket_open() failed");
+ return 1;
+ }
+
+ if(mnl_socket_bind(nfacct_root.mnl, 0, MNL_SOCKET_AUTOPID) < 0) {
+ error("nfacct.plugin: mnl_socket_bind() failed");
+ return 1;
+ }
+ nfacct_root.portid = mnl_socket_get_portid(nfacct_root.mnl);
+
+ return 0;
+}
+
+static void nfacct_cleanup() {
+ if(nfacct_root.mnl) {
+ mnl_socket_close(nfacct_root.mnl);
+ nfacct_root.mnl = NULL;
+ }
+
+ if(nfacct_root.nfacct_buffer) {
+ nfacct_free(nfacct_root.nfacct_buffer);
+ nfacct_root.nfacct_buffer = NULL;
+ }
+
+ freez(nfacct_root.buf);
+ nfacct_root.buf = NULL;
+ nfacct_root.buf_size = 0;
+
+ // TODO: cleanup the metrics linked list
+}
+
+static int nfacct_callback(const struct nlmsghdr *nlh, void *data) {
+ (void)data;
+
+ if(nfacct_nlmsg_parse_payload(nlh, nfacct_root.nfacct_buffer) < 0) {
+ error("NFACCT: nfacct_nlmsg_parse_payload() failed.");
+ return MNL_CB_OK;
+ }
+
+ const char *name = nfacct_attr_get_str(nfacct_root.nfacct_buffer, NFACCT_ATTR_NAME);
+ uint32_t hash = simple_hash(name);
+
+ struct nfacct_data *d = nfacct_data_get(name, hash);
+
+ d->pkts = nfacct_attr_get_u64(nfacct_root.nfacct_buffer, NFACCT_ATTR_PKTS);
+ d->bytes = nfacct_attr_get_u64(nfacct_root.nfacct_buffer, NFACCT_ATTR_BYTES);
+ d->updated = 1;
+
+ return MNL_CB_OK;
+}
+
+static int nfacct_collect() {
+ // mark all old metrics as not-updated
+ struct nfacct_data *d;
+ for(d = nfacct_root.nfacct_metrics; d ; d = d->next)
+ d->updated = 0;
+
+ // prepare the request
+ nfacct_root.seq++;
+ nfacct_root.nlh = nfacct_nlmsg_build_hdr(nfacct_root.buf, NFNL_MSG_ACCT_GET, NLM_F_DUMP, (uint32_t)nfacct_root.seq);
+ if(!nfacct_root.nlh) {
+ error("NFACCT: nfacct_nlmsg_build_hdr() failed");
+ return 1;
+ }
+
+ // send the request
+ if(mnl_socket_sendto(nfacct_root.mnl, nfacct_root.nlh, nfacct_root.nlh->nlmsg_len) < 0) {
+ error("NFACCT: mnl_socket_sendto() failed");
+ return 1;
+ }
+
+ // get the reply
+ ssize_t ret;
+ while((ret = mnl_socket_recvfrom(nfacct_root.mnl, nfacct_root.buf, nfacct_root.buf_size)) > 0) {
+ if(mnl_cb_run(
+ nfacct_root.buf
+ , (size_t)ret
+ , nfacct_root.seq
+ , nfacct_root.portid
+ , nfacct_callback
+ , NULL
+ ) <= 0)
+ break;
+ }
+
+ // verify we run without issues
+ if (ret == -1) {
+ error("NFACCT: error communicating with kernel. This plugin can only work when netdata runs as root.");
+ return 1;
+ }
+
+ return 0;
+}
+
+static void nfacct_send_metrics() {
+ static RRDSET *st_bytes = NULL, *st_packets = NULL;
+
+ if(!nfacct_root.nfacct_metrics) return;
+ struct nfacct_data *d;
+
+ if(!st_packets) {
+ st_packets = rrdset_create_localhost(
+ "netfilter"
+ , "nfacct_packets"
+ , NULL
+ , "nfacct"
+ , NULL
+ , "Netfilter Accounting Packets"
+ , "packets/s"
+ , PLUGIN_NFACCT_NAME
+ , NULL
+ , NETDATA_CHART_PRIO_NETFILTER_PACKETS
+ , nfacct_root.update_every
+ , RRDSET_TYPE_STACKED
+ );
+ }
+ else rrdset_next(st_packets);
+
+ for(d = nfacct_root.nfacct_metrics; d ; d = d->next) {
+ if(likely(d->updated)) {
+ if(unlikely(!d->rd_packets))
+ d->rd_packets = rrddim_add(
+ st_packets
+ , d->name
+ , NULL
+ , 1
+ , nfacct_root.update_every
+ , RRD_ALGORITHM_INCREMENTAL
+ );
+
+ rrddim_set_by_pointer(
+ st_packets
+ , d->rd_packets
+ , (collected_number)d->pkts
+ );
+ }
+ }
+
+ rrdset_done(st_packets);
+
+ // ----------------------------------------------------------------
+
+ st_bytes = rrdset_find_bytype_localhost("netfilter", "nfacct_bytes");
+ if(!st_bytes) {
+ st_bytes = rrdset_create_localhost(
+ "netfilter"
+ , "nfacct_bytes"
+ , NULL
+ , "nfacct"
+ , NULL
+ , "Netfilter Accounting Bandwidth"
+ , "kilobytes/s"
+ , PLUGIN_NFACCT_NAME
+ , NULL
+ , NETDATA_CHART_PRIO_NETFILTER_BYTES
+ , nfacct_root.update_every
+ , RRDSET_TYPE_STACKED
+ );
+ }
+ else rrdset_next(st_bytes);
+
+ for(d = nfacct_root.nfacct_metrics; d ; d = d->next) {
+ if(likely(d->updated)) {
+ if(unlikely(!d->rd_bytes))
+ d->rd_bytes = rrddim_add(
+ st_bytes
+ , d->name
+ , NULL
+ , 1
+ , 1000 * nfacct_root.update_every
+ , RRD_ALGORITHM_INCREMENTAL
+ );
+
+ rrddim_set_by_pointer(
+ st_bytes
+ , d->rd_bytes
+ , (collected_number)d->bytes
+ );
+ }
+ }
+
+ rrdset_done(st_bytes);
+}
+
+#endif // HAVE_LIBNETFILTER_ACCT
+#endif // HAVE_LIBMNL
+
+// ----------------------------------------------------------------------------
+
+static void nfacct_main_cleanup(void *ptr) {
+ struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
+ info("cleaning up...");
+
+#ifdef DO_NFACCT
+ nfacct_cleanup();
+#endif
+
+#ifdef DO_NFSTAT
+ nfstat_cleanup();
+#endif
+
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+}
+
+void *nfacct_main(void *ptr) {
+ netdata_thread_cleanup_push(nfacct_main_cleanup, ptr);
+
+ int update_every = (int)config_get_number("plugin:netfilter", "update every", localhost->rrd_update_every);
+ if(update_every < localhost->rrd_update_every)
+ update_every = localhost->rrd_update_every;
+
+#ifdef DO_NFACCT
+ int nfacct = !nfacct_init(update_every);
+#endif
+
+#ifdef DO_NFSTAT
+ int nfstat = !nfstat_init(update_every);
+#endif
+
+ // ------------------------------------------------------------------------
+
+ usec_t step = update_every * USEC_PER_SEC;
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ for(;;) {
+ heartbeat_next(&hb, step);
+
+ if(unlikely(netdata_exit)) break;
+
+#ifdef DO_NFACCT
+ if(likely(nfacct)) {
+ nfacct = !nfacct_collect();
+
+ if(likely(nfacct))
+ nfacct_send_metrics();
+ }
+#endif
+
+#ifdef DO_NFSTAT
+ if(likely(nfstat)) {
+ nfstat = !nfstat_collect();
+
+ if(likely(nfstat))
+ nfstat_send_metrics();
+ }
+#endif
+ }
+
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
+
+#endif // INTERNAL_PLUGIN_NFACCT
diff --git a/collectors/nfacct.plugin/plugin_nfacct.h b/collectors/nfacct.plugin/plugin_nfacct.h
new file mode 100644
index 000000000..4311ccecf
--- /dev/null
+++ b/collectors/nfacct.plugin/plugin_nfacct.h
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_NFACCT_H
+#define NETDATA_NFACCT_H 1
+
+#include "../../daemon/common.h"
+
+#if defined(INTERNAL_PLUGIN_NFACCT)
+
+#define NETDATA_PLUGIN_HOOK_LINUX_NFACCT \
+ { \
+ .name = "PLUGIN[nfacct]", \
+ .config_section = CONFIG_SECTION_PLUGINS, \
+ .config_name = "nfacct", \
+ .enabled = 1, \
+ .thread = NULL, \
+ .init_routine = NULL, \
+ .start_routine = nfacct_main \
+ },
+
+extern void *nfacct_main(void *ptr);
+
+#else // !defined(INTERNAL_PLUGIN_NFACCT)
+
+#define NETDATA_PLUGIN_HOOK_LINUX_NFACCT
+
+#endif // defined(INTERNAL_PLUGIN_NFACCT)
+
+#endif /* NETDATA_NFACCT_H */
+
diff --git a/collectors/node.d.plugin/Makefile.am b/collectors/node.d.plugin/Makefile.am
new file mode 100644
index 000000000..4de13cf76
--- /dev/null
+++ b/collectors/node.d.plugin/Makefile.am
@@ -0,0 +1,59 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+CLEANFILES = \
+ node.d.plugin \
+ $(NULL)
+
+include $(top_srcdir)/build/subst.inc
+SUFFIXES = .in
+
+dist_libconfig_DATA = \
+ node.d.conf \
+ $(NULL)
+
+dist_plugins_SCRIPTS = \
+ node.d.plugin \
+ $(NULL)
+
+dist_noinst_DATA = \
+ node.d.plugin.in \
+ README.md \
+ $(NULL)
+
+usernodeconfigdir=$(configdir)/node.d
+dist_usernodeconfig_DATA = \
+ $(top_srcdir)/installer/.keep \
+ $(NULL)
+
+nodeconfigdir=$(libconfigdir)/node.d
+dist_nodeconfig_DATA = \
+ $(top_srcdir)/installer/.keep \
+ $(NULL)
+
+dist_node_DATA = \
+ $(NULL)
+
+include fronius/Makefile.inc
+include named/Makefile.inc
+include sma_webbox/Makefile.inc
+include snmp/Makefile.inc
+include stiebeleltron/Makefile.inc
+
+nodemodulesdir=$(nodedir)/node_modules
+dist_nodemodules_DATA = \
+ node_modules/netdata.js \
+ node_modules/extend.js \
+ node_modules/pixl-xml.js \
+ node_modules/net-snmp.js \
+ node_modules/asn1-ber.js \
+ $(NULL)
+
+nodemoduleslibberdir=$(nodedir)/node_modules/lib/ber
+dist_nodemoduleslibber_DATA = \
+ node_modules/lib/ber/index.js \
+ node_modules/lib/ber/errors.js \
+ node_modules/lib/ber/reader.js \
+ node_modules/lib/ber/types.js \
+ node_modules/lib/ber/writer.js \
+ $(NULL)
diff --git a/collectors/node.d.plugin/Makefile.in b/collectors/node.d.plugin/Makefile.in
new file mode 100644
index 000000000..4aec01dea
--- /dev/null
+++ b/collectors/node.d.plugin/Makefile.in
@@ -0,0 +1,805 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+DIST_COMMON = $(top_srcdir)/build/subst.inc \
+ $(srcdir)/fronius/Makefile.inc $(srcdir)/named/Makefile.inc \
+ $(srcdir)/sma_webbox/Makefile.inc $(srcdir)/snmp/Makefile.inc \
+ $(srcdir)/stiebeleltron/Makefile.inc $(srcdir)/Makefile.in \
+ $(srcdir)/Makefile.am $(dist_plugins_SCRIPTS) \
+ $(dist_libconfig_DATA) $(dist_node_DATA) \
+ $(dist_nodeconfig_DATA) $(dist_nodemodules_DATA) \
+ $(dist_nodemoduleslibber_DATA) $(dist_noinst_DATA) \
+ $(dist_usernodeconfig_DATA)
+subdir = collectors/node.d.plugin
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+ test -z "$$files" \
+ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+ $(am__cd) "$$dir" && rm -f $$files; }; \
+ }
+am__installdirs = "$(DESTDIR)$(pluginsdir)" \
+ "$(DESTDIR)$(libconfigdir)" "$(DESTDIR)$(nodedir)" \
+ "$(DESTDIR)$(nodeconfigdir)" "$(DESTDIR)$(nodemodulesdir)" \
+ "$(DESTDIR)$(nodemoduleslibberdir)" \
+ "$(DESTDIR)$(usernodeconfigdir)"
+SCRIPTS = $(dist_plugins_SCRIPTS)
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_libconfig_DATA) $(dist_node_DATA) \
+ $(dist_nodeconfig_DATA) $(dist_nodemodules_DATA) \
+ $(dist_nodemoduleslibber_DATA) $(dist_noinst_DATA) \
+ $(dist_usernodeconfig_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+CLEANFILES = \
+ node.d.plugin \
+ $(NULL)
+
+SUFFIXES = .in
+dist_libconfig_DATA = \
+ node.d.conf \
+ $(NULL)
+
+dist_plugins_SCRIPTS = \
+ node.d.plugin \
+ $(NULL)
+
+# dist_nodeconfig_DATA += fronius/fronius.conf
+
+# do not install these files, but include them in the distribution
+# dist_nodeconfig_DATA += named/named.conf
+
+# do not install these files, but include them in the distribution
+# dist_nodeconfig_DATA += sma_webbox/sma_webbox.conf
+
+# do not install these files, but include them in the distribution
+# dist_nodeconfig_DATA += snmp/snmp.conf
+
+# do not install these files, but include them in the distribution
+# dist_nodeconfig_DATA += stiebeleltron/stiebeleltron.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA = node.d.plugin.in README.md $(NULL) \
+ fronius/README.md fronius/Makefile.inc named/README.md \
+ named/Makefile.inc sma_webbox/README.md \
+ sma_webbox/Makefile.inc snmp/README.md snmp/Makefile.inc \
+ stiebeleltron/README.md stiebeleltron/Makefile.inc
+usernodeconfigdir = $(configdir)/node.d
+dist_usernodeconfig_DATA = \
+ $(top_srcdir)/installer/.keep \
+ $(NULL)
+
+nodeconfigdir = $(libconfigdir)/node.d
+dist_nodeconfig_DATA = \
+ $(top_srcdir)/installer/.keep \
+ $(NULL)
+
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+dist_node_DATA = $(NULL) fronius/fronius.node.js named/named.node.js \
+ sma_webbox/sma_webbox.node.js snmp/snmp.node.js \
+ stiebeleltron/stiebeleltron.node.js
+nodemodulesdir = $(nodedir)/node_modules
+dist_nodemodules_DATA = \
+ node_modules/netdata.js \
+ node_modules/extend.js \
+ node_modules/pixl-xml.js \
+ node_modules/net-snmp.js \
+ node_modules/asn1-ber.js \
+ $(NULL)
+
+nodemoduleslibberdir = $(nodedir)/node_modules/lib/ber
+dist_nodemoduleslibber_DATA = \
+ node_modules/lib/ber/index.js \
+ node_modules/lib/ber/errors.js \
+ node_modules/lib/ber/reader.js \
+ node_modules/lib/ber/types.js \
+ node_modules/lib/ber/writer.js \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .in
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(srcdir)/fronius/Makefile.inc $(srcdir)/named/Makefile.inc $(srcdir)/sma_webbox/Makefile.inc $(srcdir)/snmp/Makefile.inc $(srcdir)/stiebeleltron/Makefile.inc $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/node.d.plugin/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu collectors/node.d.plugin/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+$(top_srcdir)/build/subst.inc $(srcdir)/fronius/Makefile.inc $(srcdir)/named/Makefile.inc $(srcdir)/sma_webbox/Makefile.inc $(srcdir)/snmp/Makefile.inc $(srcdir)/stiebeleltron/Makefile.inc:
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
+ done | \
+ sed -e 'p;s,.*/,,;n' \
+ -e 'h;s|.*|.|' \
+ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
+ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
+ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
+ if ($$2 == $$4) { files[d] = files[d] " " $$1; \
+ if (++n[d] == $(am__install_max)) { \
+ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
+ else { print "f", d "/" $$4, $$1 } } \
+ END { for (d in files) print "f", d, files[d] }' | \
+ while read type dir files; do \
+ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
+ test -z "$$files" || { \
+ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \
+ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \
+ } \
+ ; done
+
+uninstall-dist_pluginsSCRIPTS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \
+ files=`for p in $$list; do echo "$$p"; done | \
+ sed -e 's,.*/,,;$(transform)'`; \
+ dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir)
+install-dist_libconfigDATA: $(dist_libconfig_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \
+ done
+
+uninstall-dist_libconfigDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir)
+install-dist_nodeDATA: $(dist_node_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_node_DATA)'; test -n "$(nodedir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(nodedir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(nodedir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(nodedir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(nodedir)" || exit $$?; \
+ done
+
+uninstall-dist_nodeDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_node_DATA)'; test -n "$(nodedir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(nodedir)'; $(am__uninstall_files_from_dir)
+install-dist_nodeconfigDATA: $(dist_nodeconfig_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_nodeconfig_DATA)'; test -n "$(nodeconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(nodeconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(nodeconfigdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(nodeconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(nodeconfigdir)" || exit $$?; \
+ done
+
+uninstall-dist_nodeconfigDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_nodeconfig_DATA)'; test -n "$(nodeconfigdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(nodeconfigdir)'; $(am__uninstall_files_from_dir)
+install-dist_nodemodulesDATA: $(dist_nodemodules_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_nodemodules_DATA)'; test -n "$(nodemodulesdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(nodemodulesdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(nodemodulesdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(nodemodulesdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(nodemodulesdir)" || exit $$?; \
+ done
+
+uninstall-dist_nodemodulesDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_nodemodules_DATA)'; test -n "$(nodemodulesdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(nodemodulesdir)'; $(am__uninstall_files_from_dir)
+install-dist_nodemoduleslibberDATA: $(dist_nodemoduleslibber_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_nodemoduleslibber_DATA)'; test -n "$(nodemoduleslibberdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(nodemoduleslibberdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(nodemoduleslibberdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(nodemoduleslibberdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(nodemoduleslibberdir)" || exit $$?; \
+ done
+
+uninstall-dist_nodemoduleslibberDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_nodemoduleslibber_DATA)'; test -n "$(nodemoduleslibberdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(nodemoduleslibberdir)'; $(am__uninstall_files_from_dir)
+install-dist_usernodeconfigDATA: $(dist_usernodeconfig_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_usernodeconfig_DATA)'; test -n "$(usernodeconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(usernodeconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(usernodeconfigdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(usernodeconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(usernodeconfigdir)" || exit $$?; \
+ done
+
+uninstall-dist_usernodeconfigDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_usernodeconfig_DATA)'; test -n "$(usernodeconfigdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(usernodeconfigdir)'; $(am__uninstall_files_from_dir)
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(SCRIPTS) $(DATA)
+installdirs:
+ for dir in "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(libconfigdir)" "$(DESTDIR)$(nodedir)" "$(DESTDIR)$(nodeconfigdir)" "$(DESTDIR)$(nodemodulesdir)" "$(DESTDIR)$(nodemoduleslibberdir)" "$(DESTDIR)$(usernodeconfigdir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+ -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am: install-dist_libconfigDATA install-dist_nodeDATA \
+ install-dist_nodeconfigDATA install-dist_nodemodulesDATA \
+ install-dist_nodemoduleslibberDATA install-dist_pluginsSCRIPTS \
+ install-dist_usernodeconfigDATA
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-dist_libconfigDATA uninstall-dist_nodeDATA \
+ uninstall-dist_nodeconfigDATA uninstall-dist_nodemodulesDATA \
+ uninstall-dist_nodemoduleslibberDATA \
+ uninstall-dist_pluginsSCRIPTS \
+ uninstall-dist_usernodeconfigDATA
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dist_libconfigDATA \
+ install-dist_nodeDATA install-dist_nodeconfigDATA \
+ install-dist_nodemodulesDATA \
+ install-dist_nodemoduleslibberDATA install-dist_pluginsSCRIPTS \
+ install-dist_usernodeconfigDATA install-dvi install-dvi-am \
+ install-exec install-exec-am install-html install-html-am \
+ install-info install-info-am install-man install-pdf \
+ install-pdf-am install-ps install-ps-am install-strip \
+ installcheck installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am \
+ uninstall-dist_libconfigDATA uninstall-dist_nodeDATA \
+ uninstall-dist_nodeconfigDATA uninstall-dist_nodemodulesDATA \
+ uninstall-dist_nodemoduleslibberDATA \
+ uninstall-dist_pluginsSCRIPTS \
+ uninstall-dist_usernodeconfigDATA
+
+.in:
+ if sed \
+ -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \
+ -e 's#[@]sbindir_POST@#$(sbindir)#g' \
+ -e 's#[@]sysconfdir_POST@#$(sysconfdir)#g' \
+ -e 's#[@]pythondir_POST@#$(pythondir)#g' \
+ -e 's#[@]configdir_POST@#$(configdir)#g' \
+ -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \
+ -e 's#[@]cachedir_POST@#$(cachedir)#g' \
+ $< > $@.tmp; then \
+ mv "$@.tmp" "$@"; \
+ else \
+ rm -f "$@.tmp"; \
+ false; \
+ fi
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/collectors/node.d.plugin/README.md b/collectors/node.d.plugin/README.md
new file mode 100644
index 000000000..dd977017d
--- /dev/null
+++ b/collectors/node.d.plugin/README.md
@@ -0,0 +1,218 @@
+# node.d.plugin
+
+`node.d.plugin` is a netdata external plugin. It is an **orchestrator** for data collection modules written in `node.js`.
+
+1. It runs as an independent process `ps fax` shows it
+2. It is started and stopped automatically by netdata
+3. It communicates with netdata via a unidirectional pipe (sending data to the netdata daemon)
+4. Supports any number of data collection **modules**
+5. Allows each **module** to have one or more data collection **jobs**
+6. Each **job** is collecting one or more metrics from a single data source
+
+# Motivation
+
+Node.js is perfect for asynchronous operations. It is very fast and quite common (actually the whole web is based on it).
+Since data collection is not a CPU intensive task, node.js is an ideal solution for it.
+
+`node.d.plugin` is a netdata plugin that provides an abstraction layer to allow easy and quick development of data
+collectors in node.js. It also manages all its data collectors (placed in `/usr/libexec/netdata/node.d`) using a single
+instance of node, thus lowering the memory footprint of data collection.
+
+Of course, there can be independent plugins written in node.js (placed in `/usr/libexec/netdata/plugins`).
+These will have to be developed using the guidelines of **[External Plugins](../plugins.d/)**.
+
+To run `node.js` plugins you need to have `node` installed in your system.
+
+In some older systems, the package named `node` is not node.js. It is a terminal emulation program called `ax25-node`.
+In this case the node.js package may be referred as `nodejs`. Once you install `nodejs`, we suggest to link
+`/usr/bin/nodejs` to `/usr/bin/node`, so that typing `node` in your terminal, opens node.js.
+For more information check the **[[Installation]]** guide.
+
+## configuring `node.d.plugin`
+
+`node.d.plugin` can work even without any configuration. Its default configuration file is
+[/etc/netdata/node.d.conf](node.d.conf) (to edit it on your system run `/etc/netdata/edit-config node.d.conf`).
+
+## configuring `node.d.plugin` modules
+
+`node.d.plugin` modules accept configuration in `JSON` format.
+
+Unfortunately, `JSON` files do not accept comments. So, the best way to describe them is to have markdown text files
+with instructions.
+
+`JSON` has a very strict formatting. If you get errors from netdata at `/var/log/netdata/error.log` that a certain
+configuration file cannot be loaded, we suggest to verify it at [http://jsonlint.com/](http://jsonlint.com/).
+
+The files in this directory, provide usable examples for configuring each `node.d.plugin` module.
+
+
+## debugging modules written for node.d.plugin
+
+To test `node.d.plugin` modules, which are placed in `/usr/libexec/netdata/node.d`, you can run `node.d.plugin` by hand,
+like this:
+
+```sh
+# become user netdata
+sudo su -s /bin/sh netdata
+
+# run the plugin in debug mode
+/usr/libexec/netdata/plugins.d/node.d.plugin debug 1 X Y Z
+```
+
+`node.d.plugin` will run in `debug` mode (lots of debug info), with an update frequency of `1` second, evaluating only
+the collector scripts `X` (i.e. `/usr/libexec/netdata/node.d/X.node.js`), `Y` and `Z`.
+You can define zero or more modules. If none is defined, `node.d.plugin` will evaluate all modules available.
+
+Keep in mind that if your configs are not in `/etc/netdata`, you should do the following before running `node.d.plugin`:
+
+```sh
+export NETDATA_USER_CONFIG_DIR="/path/to/etc/netdata"
+```
+
+---
+
+## developing `node.d.plugin` modules
+
+Your data collection module should be split in 3 parts:
+
+ - a function to fetch the data from its source. `node.d.plugin` already can fetch data from web sources,
+ so you don't need to do anything about it for http.
+
+ - a function to process the fetched/manipulate the data fetched. This function will make a number of calls
+ to create charts and dimensions and pass the collected values to netdata.
+ This is the only function you need to write for collecting http JSON data.
+
+ - a `configure` and an `update` function, which take care of your module configuration and data refresh
+ respectively. You can use the supplied ones.
+
+Your module will automatically be able to process any number of servers, with different settings (even different
+data collection frequencies). You will write just the work needed for one and `node.d.plugin` will do the rest.
+For each server you are going to fetch data from, you will have to create a `service` (more later).
+
+### writing the data collection module
+
+To provide a module called `mymodule`, you have create the file `/usr/libexec/netdata/node.d/mymodule.node.js`, with this structure:
+
+```js
+
+// the processor is needed only
+// if you need a custom processor
+// other than http
+netdata.processors.myprocessor = {
+ name: 'myprocessor',
+
+ process: function(service, callback) {
+
+ /* do data collection here */
+
+ callback(data);
+ }
+};
+
+// this is the mymodule definition
+var mymodule = {
+ processResponse: function(service, data) {
+
+ /* send information to the netdata server here */
+
+ },
+
+ configure: function(config) {
+ var eligible_services = 0;
+
+ if(typeof(config.servers) === 'undefined' || config.servers.length === 0) {
+
+ /*
+ * create a service using internal defaults;
+ * this is used for auto-detecting the settings
+ * if possible
+ */
+
+ netdata.service({
+ name: 'a name for this service',
+ update_every: this.update_every,
+ module: this,
+ processor: netdata.processors.myprocessor,
+ // any other information your processor needs
+ }).execute(this.processResponse);
+
+ eligible_services++;
+ }
+ else {
+
+ /*
+ * create a service for each server in the
+ * configuration file
+ */
+
+ var len = config.servers.length;
+ while(len--) {
+ var server = config.servers[len];
+
+ netdata.service({
+ name: server.name,
+ update_every: server.update_every,
+ module: this,
+ processor: netdata.processors.myprocessor,
+ // any other information your processor needs
+ }).execute(this.processResponse);
+
+ eligible_services++;
+ }
+ }
+
+ return eligible_services;
+ },
+
+ update: function(service, callback) {
+
+ /*
+ * this function is called when each service
+ * created by the configure function, needs to
+ * collect updated values.
+ *
+ * You normally will not need to change it.
+ */
+
+ service.execute(function(service, data) {
+ mymodule.processResponse(service, data);
+ callback();
+ });
+ },
+};
+
+module.exports = mymodule;
+```
+
+#### configure(config)
+
+`configure(config)` is called just once, when `node.d.plugin` starts.
+The config file will contain the contents of `/etc/netdata/node.d/mymodule.conf`.
+This file should have the following format:
+
+```js
+{
+ "enable_autodetect": false,
+ "update_every": 5,
+ "servers": [ { /* server 1 */ }, { /* server 2 */ } ]
+}
+```
+
+If the config file `/etc/netdata/node.d/mymodule.conf` does not give a `enable_autodetect` or `update_every`, these
+will be added by `node.d.plugin`. So you module will always have them.
+
+The configuration file `/etc/netdata/node.d/mymodule.conf` may contain whatever else is needed for `mymodule`.
+
+#### processResponse(data)
+
+`data` may be `null` or whatever the processor specified in the `service` returned.
+
+The `service` object defines a set of functions to allow you send information to the netdata core about:
+
+1. Charts and dimension definitions
+2. Updated values, from the collected values
+
+---
+
+*FIXME: document an operational node.d.plugin data collector - the best example is the
+[snmp collector](snmp/snmp.node.js)*
diff --git a/collectors/node.d.plugin/fronius/Makefile.inc b/collectors/node.d.plugin/fronius/Makefile.inc
new file mode 100644
index 000000000..da0743a88
--- /dev/null
+++ b/collectors/node.d.plugin/fronius/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_node_DATA += fronius/fronius.node.js
+# dist_nodeconfig_DATA += fronius/fronius.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += fronius/README.md fronius/Makefile.inc
+
diff --git a/collectors/node.d.plugin/fronius/README.md b/collectors/node.d.plugin/fronius/README.md
new file mode 100644
index 000000000..dd2846990
--- /dev/null
+++ b/collectors/node.d.plugin/fronius/README.md
@@ -0,0 +1,120 @@
+# fronius
+
+This module collects metrics from the configured solar power installation from Fronius Symo.
+
+**Requirements**
+ * Configuration file `fronius.conf` in the node.d netdata config dir (default: `/etc/netdata/node.d/fronius.conf`)
+ * Fronius Symo with network access (http)
+
+It produces per server:
+
+1. **Power**
+ * Current power input from the grid (positive values), output to the grid (negative values), in W
+ * Current power input from the solar panels, in W
+ * Current power stored in the accumulator (if present), in W (in theory, untested)
+
+2. **Consumption**
+ * Local consumption in W
+
+3. **Autonomy**
+ * Relative autonomy in %. 100 % autonomy means that the solar panels are delivering more power than it is needed by local consumption.
+ * Relative self consumption in %. The lower the better
+
+4. **Energy**
+ * The energy produced during the current day, in kWh
+ * The energy produced during the current year, in kWh
+
+5. **Inverter**
+ * The current power output from the connected inverters, in W, one dimension per inverter. At least one is always present.
+
+
+### configuration
+
+Sample:
+
+```json
+{
+ "enable_autodetect": false,
+ "update_every": 5,
+ "servers": [
+ {
+ "name": "Symo",
+ "hostname": "symo.ip.or.dns",
+ "update_every": 5,
+ "api_path": "/solar_api/v1/GetPowerFlowRealtimeData.fcgi"
+ }
+ ]
+}
+```
+
+If no configuration is given, the module will be disabled. Each `update_every` is optional, the default is `5`.
+
+---
+
+[Fronius Symo 8.2](https://www.fronius.com/en/photovoltaics/products/all-products/inverters/fronius-symo/fronius-symo-8-2-3-m)
+
+The plugin has been tested with a single inverter, namely Fronius Symo 8.2-3-M:
+
+- Datalogger version: 240.162630
+- Software version: 3.7.4-6
+- Hardware version: 2.4D
+
+Other products and versions may work, but without any guarantees.
+
+Example netdata configuration for node.d/fronius.conf. Copy this section to fronius.conf and change name/ip.
+The module supports any number of servers. Sometimes there is a lag when collecting every 3 seconds, so 5 should be okay too. You can modify this per server.
+```json
+{
+ "enable_autodetect": false,
+ "update_every": 5,
+ "servers": [
+ {
+ "name": "solar",
+ "hostname": "symo.ip.or.dns",
+ "update_every": 5,
+ "api_path": "/solar_api/v1/GetPowerFlowRealtimeData.fcgi"
+ }
+ ]
+}
+```
+
+The output of /solar_api/v1/GetPowerFlowRealtimeData.fcgi looks like this:
+```json
+{
+ "Head" : {
+ "RequestArguments" : {},
+ "Status" : {
+ "Code" : 0,
+ "Reason" : "",
+ "UserMessage" : ""
+ },
+ "Timestamp" : "2017-07-05T12:35:12+02:00"
+ },
+ "Body" : {
+ "Data" : {
+ "Site" : {
+ "Mode" : "meter",
+ "P_Grid" : -6834.549847,
+ "P_Load" : -1271.450153,
+ "P_Akku" : null,
+ "P_PV" : 8106,
+ "rel_SelfConsumption" : 15.685297,
+ "rel_Autonomy" : 100,
+ "E_Day" : 35020,
+ "E_Year" : 5826076,
+ "E_Total" : 14788870,
+ "Meter_Location" : "grid"
+ },
+ "Inverters" : {
+ "1" : {
+ "DT" : 123,
+ "P" : 8106,
+ "E_Day" : 35020,
+ "E_Year" : 5826076,
+ "E_Total" : 14788870
+ }
+ }
+ }
+ }
+}
+```
diff --git a/collectors/node.d.plugin/fronius/fronius.node.js b/collectors/node.d.plugin/fronius/fronius.node.js
new file mode 100644
index 000000000..436f3a325
--- /dev/null
+++ b/collectors/node.d.plugin/fronius/fronius.node.js
@@ -0,0 +1,400 @@
+"use strict";
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+// This program will connect to one or more Fronius Symo Inverters.
+// to get the Solar Power Generated (current, today).
+
+// example configuration in netdata/conf.d/node.d/fronius.conf.md
+
+require("url");
+require("http");
+var netdata = require("netdata");
+
+netdata.debug("loaded " + __filename + " plugin");
+
+var fronius = {
+ name: "Fronius",
+ enable_autodetect: false,
+ update_every: 5,
+ base_priority: 60000,
+ charts: {},
+
+ powerGridId: "p_grid",
+ powerPvId: "p_pv",
+ powerAccuId: "p_akku", // not my typo! Using the ID from the AP
+ consumptionLoadId: "p_load",
+ autonomyId: "rel_autonomy",
+ consumptionSelfId: "rel_selfconsumption",
+ solarConsumptionId: "solar_consumption",
+ energyTodayId: "e_day",
+ energyYearId: "e_year",
+
+ createBasicDimension: function (id, name, divisor) {
+ return {
+ id: id, // the unique id of the dimension
+ name: name, // the name of the dimension
+ algorithm: netdata.chartAlgorithms.absolute,// the id of the netdata algorithm
+ multiplier: 1, // the multiplier
+ divisor: divisor, // the divisor
+ hidden: false // is hidden (boolean)
+ };
+ },
+
+ // Gets the site power chart. Will be created if not existing.
+ getSitePowerChart: function (service, suffix) {
+ var id = this.getChartId(service, suffix);
+ var chart = fronius.charts[id];
+ if (fronius.isDefined(chart)) return chart;
+
+ var dim = {};
+ dim[fronius.powerGridId] = this.createBasicDimension(fronius.powerGridId, "grid", 1);
+ dim[fronius.powerPvId] = this.createBasicDimension(fronius.powerPvId, "photovoltaics", 1);
+ dim[fronius.powerAccuId] = this.createBasicDimension(fronius.powerAccuId, "accumulator", 1);
+
+ chart = {
+ id: id, // the unique id of the chart
+ name: "", // the unique name of the chart
+ title: service.name + " Current Site Power", // the title of the chart
+ units: "W", // the units of the chart dimensions
+ family: "power", // the family of the chart
+ context: "fronius.power", // the context of the chart
+ type: netdata.chartTypes.area, // the type of the chart
+ priority: fronius.base_priority + 1, // the priority relative to others in the same family
+ update_every: service.update_every, // the expected update frequency of the chart
+ dimensions: dim
+ };
+ chart = service.chart(id, chart);
+ fronius.charts[id] = chart;
+
+ return chart;
+ },
+
+ // Gets the site consumption chart. Will be created if not existing.
+ getSiteConsumptionChart: function (service, suffix) {
+ var id = this.getChartId(service, suffix);
+ var chart = fronius.charts[id];
+ if (fronius.isDefined(chart)) return chart;
+ var dim = {};
+ dim[fronius.consumptionLoadId] = this.createBasicDimension(fronius.consumptionLoadId, "load", 1);
+
+ chart = {
+ id: id, // the unique id of the chart
+ name: "", // the unique name of the chart
+ title: service.name + " Current Load", // the title of the chart
+ units: "W", // the units of the chart dimensions
+ family: "consumption", // the family of the chart
+ context: "fronius.consumption", // the context of the chart
+ type: netdata.chartTypes.area, // the type of the chart
+ priority: fronius.base_priority + 2, // the priority relative to others in the same family
+ update_every: service.update_every, // the expected update frequency of the chart
+ dimensions: dim
+ };
+ chart = service.chart(id, chart);
+ fronius.charts[id] = chart;
+
+ return chart;
+ },
+
+ // Gets the site consumption chart. Will be created if not existing.
+ getSiteAutonomyChart: function (service, suffix) {
+ var id = this.getChartId(service, suffix);
+ var chart = fronius.charts[id];
+ if (fronius.isDefined(chart)) return chart;
+ var dim = {};
+ dim[fronius.autonomyId] = this.createBasicDimension(fronius.autonomyId, "autonomy", 1);
+ dim[fronius.consumptionSelfId] = this.createBasicDimension(fronius.consumptionSelfId, "self_consumption", 1);
+ dim[fronius.solarConsumptionId] = this.createBasicDimension(fronius.solarConsumptionId, "solar_consumption", 1);
+
+ chart = {
+ id: id, // the unique id of the chart
+ name: "", // the unique name of the chart
+ title: service.name + " Current Autonomy", // the title of the chart
+ units: "%", // the units of the chart dimensions
+ family: "autonomy", // the family of the chart
+ context: "fronius.autonomy", // the context of the chart
+ type: netdata.chartTypes.area, // the type of the chart
+ priority: fronius.base_priority + 3, // the priority relative to others in the same family
+ update_every: service.update_every, // the expected update frequency of the chart
+ dimensions: dim
+ };
+ chart = service.chart(id, chart);
+ fronius.charts[id] = chart;
+
+ return chart;
+ },
+
+ // Gets the site energy chart for today. Will be created if not existing.
+ getSiteEnergyTodayChart: function (service, suffix) {
+ var chartId = this.getChartId(service, suffix);
+ var chart = fronius.charts[chartId];
+ if (fronius.isDefined(chart)) return chart;
+ var dim = {};
+ dim[fronius.energyTodayId] = this.createBasicDimension(fronius.energyTodayId, "today", 1000);
+ chart = {
+ id: chartId, // the unique id of the chart
+ name: "", // the unique name of the chart
+ title: service.name + " Energy production for today",// the title of the chart
+ units: "kWh", // the units of the chart dimensions
+ family: "energy", // the family of the chart
+ context: "fronius.energy.today", // the context of the chart
+ type: netdata.chartTypes.area, // the type of the chart
+ priority: fronius.base_priority + 4, // the priority relative to others in the same family
+ update_every: service.update_every, // the expected update frequency of the chart
+ dimensions: dim
+ };
+ chart = service.chart(chartId, chart);
+ fronius.charts[chartId] = chart;
+
+ return chart;
+ },
+
+ // Gets the site energy chart for today. Will be created if not existing.
+ getSiteEnergyYearChart: function (service, suffix) {
+ var chartId = this.getChartId(service, suffix);
+ var chart = fronius.charts[chartId];
+ if (fronius.isDefined(chart)) return chart;
+ var dim = {};
+ dim[fronius.energyYearId] = this.createBasicDimension(fronius.energyYearId, "year", 1000);
+ chart = {
+ id: chartId, // the unique id of the chart
+ name: "", // the unique name of the chart
+ title: service.name + " Energy production for this year",// the title of the chart
+ units: "kWh", // the units of the chart dimensions
+ family: "energy", // the family of the chart
+ context: "fronius.energy.year", // the context of the chart
+ type: netdata.chartTypes.area, // the type of the chart
+ priority: fronius.base_priority + 5, // the priority relative to others in the same family
+ update_every: service.update_every, // the expected update frequency of the chart
+ dimensions: dim
+ };
+ chart = service.chart(chartId, chart);
+ fronius.charts[chartId] = chart;
+
+ return chart;
+ },
+
+ // Gets the inverter power chart. Will be created if not existing.
+ // Needs the array of inverters in order to create a chart with all inverters as dimensions
+ getInverterPowerChart: function (service, suffix, inverters) {
+ var chartId = this.getChartId(service, suffix);
+ var chart = fronius.charts[chartId];
+ if (fronius.isDefined(chart)) return chart;
+
+ var dim = {};
+ for (var key in inverters) {
+ if (inverters.hasOwnProperty(key)) {
+ var name = key;
+ if (!isNaN(key)) name = "inverter_" + key;
+ dim[key] = this.createBasicDimension("inverter_" + key, name, 1);
+ }
+ }
+
+ chart = {
+ id: chartId, // the unique id of the chart
+ name: "", // the unique name of the chart
+ title: service.name + " Current Inverter Output",// the title of the chart
+ units: "W", // the units of the chart dimensions
+ family: "inverters", // the family of the chart
+ context: "fronius.inverter.output", // the context of the chart
+ type: netdata.chartTypes.stacked, // the type of the chart
+ priority: fronius.base_priority + 6, // the priority relative to others in the same family
+ update_every: service.update_every, // the expected update frequency of the chart
+ dimensions: dim
+ };
+ chart = service.chart(chartId, chart);
+ fronius.charts[chartId] = chart;
+
+ return chart;
+ },
+
+ processResponse: function (service, content) {
+ var json = fronius.convertToJson(content);
+ if (json === null) return;
+
+ // add the service
+ service.commit();
+
+ var chartDefinitions = fronius.parseCharts(service, json);
+ var chartCount = chartDefinitions.length;
+ while (chartCount--) {
+ var chartObj = chartDefinitions[chartCount];
+ service.begin(chartObj.chart);
+ var dimCount = chartObj.dimensions.length;
+ while (dimCount--) {
+ var dim = chartObj.dimensions[dimCount];
+ service.set(dim.name, dim.value);
+ }
+ service.end();
+ }
+ },
+
+ parseCharts: function (service, json) {
+ var site = json.Body.Data.Site;
+ return [
+ this.parsePowerChart(service, site),
+ this.parseConsumptionChart(service, site),
+ this.parseAutonomyChart(service, site),
+ this.parseEnergyTodayChart(service, site),
+ this.parseEnergyYearChart(service, site),
+ this.parseInverterChart(service, json.Body.Data.Inverters)
+ ];
+ },
+
+ parsePowerChart: function (service, site) {
+ return this.getChart(this.getSitePowerChart(service, "power"),
+ [
+ this.getDimension(this.powerGridId, Math.round(site.P_Grid)),
+ this.getDimension(this.powerPvId, Math.round(Math.max(site.P_PV, 0))),
+ this.getDimension(this.powerAccuId, Math.round(site.P_Akku))
+ ]
+ );
+ },
+
+ parseConsumptionChart: function (service, site) {
+ return this.getChart(this.getSiteConsumptionChart(service, "consumption"),
+ [this.getDimension(this.consumptionLoadId, Math.round(Math.abs(site.P_Load)))]
+ );
+ },
+
+ parseAutonomyChart: function (service, site) {
+ var selfConsumption = site.rel_SelfConsumption;
+ var solarConsumption = 0;
+ var load = Math.abs(site.P_Load);
+ var power = Math.max(site.P_PV, 0);
+ if (power <= 0) solarConsumption = 0;
+ else if (load >= power) solarConsumption = 100;
+ else solarConsumption = 100 / power * load;
+ return this.getChart(this.getSiteAutonomyChart(service, "autonomy"),
+ [
+ this.getDimension(this.autonomyId, Math.round(site.rel_Autonomy)),
+ this.getDimension(this.consumptionSelfId, Math.round(selfConsumption === null ? 100 : selfConsumption)),
+ this.getDimension(this.solarConsumptionId, Math.round(solarConsumption))
+ ]
+ );
+ },
+
+ parseEnergyTodayChart: function (service, site) {
+ return this.getChart(this.getSiteEnergyTodayChart(service, "energy.today"),
+ [this.getDimension(this.energyTodayId, Math.round(Math.max(site.E_Day, 0)))]
+ );
+ },
+
+ parseEnergyYearChart: function (service, site) {
+ return this.getChart(this.getSiteEnergyYearChart(service, "energy.year"),
+ [this.getDimension(this.energyYearId, Math.round(Math.max(site.E_Year, 0)))]
+ );
+ },
+
+ parseInverterChart: function (service, inverters) {
+ var dimensions = [];
+ for (var key in inverters) {
+ if (inverters.hasOwnProperty(key)) {
+ dimensions.push(this.getDimension(key, Math.round(inverters[key].P)));
+ }
+ }
+ return this.getChart(this.getInverterPowerChart(service, "inverters.output", inverters), dimensions);
+ },
+
+ getDimension: function (name, value) {
+ return {
+ name: name,
+ value: value
+ };
+ },
+
+ getChart: function (chart, dimensions) {
+ return {
+ chart: chart,
+ dimensions: dimensions
+ };
+ },
+
+ getChartId: function (service, suffix) {
+ return "fronius_" + service.name + "." + suffix;
+ },
+
+ convertToJson: function (httpBody) {
+ if (httpBody === null) return null;
+ var json = httpBody;
+ // can't parse if it's already a json object,
+ // the check enables easier testing if the httpBody is already valid JSON.
+ if (typeof httpBody !== "object") {
+ try {
+ json = JSON.parse(httpBody);
+ } catch (error) {
+ netdata.error("fronius: Got a response, but it is not valid JSON. Ignoring. Error: " + error.message);
+ return null;
+ }
+ }
+ return this.isResponseValid(json) ? json : null;
+ },
+
+ // some basic validation
+ isResponseValid: function (json) {
+ if (this.isUndefined(json.Body)) return false;
+ if (this.isUndefined(json.Body.Data)) return false;
+ if (this.isUndefined(json.Body.Data.Site)) return false;
+ return this.isDefined(json.Body.Data.Inverters);
+ },
+
+ // module.serviceExecute()
+ // this function is called only from this module
+ // its purpose is to prepare the request and call
+ // netdata.serviceExecute()
+ serviceExecute: function (name, uri, update_every) {
+ netdata.debug(this.name + ": " + name + ": url: " + uri + ", update_every: " + update_every);
+
+ var service = netdata.service({
+ name: name,
+ request: netdata.requestFromURL("http://" + uri),
+ update_every: update_every,
+ module: this
+ });
+ service.execute(this.processResponse);
+ },
+
+
+ configure: function (config) {
+ if (fronius.isUndefined(config.servers)) return 0;
+ var added = 0;
+ var len = config.servers.length;
+ while (len--) {
+ var server = config.servers[len];
+ if (fronius.isUndefined(server.update_every)) server.update_every = this.update_every;
+ if (fronius.areUndefined([server.name, server.hostname, server.api_path])) continue;
+
+ var url = server.hostname + server.api_path;
+ this.serviceExecute(server.name, url, server.update_every);
+ added++;
+ }
+ return added;
+ },
+
+ // module.update()
+ // this is called repeatedly to collect data, by calling
+ // netdata.serviceExecute()
+ update: function (service, callback) {
+ service.execute(function (serv, data) {
+ service.module.processResponse(serv, data);
+ callback();
+ });
+ },
+
+ isUndefined: function (value) {
+ return typeof value === "undefined";
+ },
+
+ areUndefined: function (valueArray) {
+ var i = 0;
+ for (i; i < valueArray.length; i++) {
+ if (this.isUndefined(valueArray[i])) return true;
+ }
+ return false;
+ },
+
+ isDefined: function (value) {
+ return typeof value !== "undefined";
+ }
+};
+
+module.exports = fronius;
diff --git a/collectors/node.d.plugin/named/Makefile.inc b/collectors/node.d.plugin/named/Makefile.inc
new file mode 100644
index 000000000..95f423012
--- /dev/null
+++ b/collectors/node.d.plugin/named/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_node_DATA += named/named.node.js
+# dist_nodeconfig_DATA += named/named.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += named/README.md named/Makefile.inc
+
diff --git a/collectors/node.d.plugin/named/README.md b/collectors/node.d.plugin/named/README.md
new file mode 100644
index 000000000..977a5015f
--- /dev/null
+++ b/collectors/node.d.plugin/named/README.md
@@ -0,0 +1,342 @@
+# ISC Bind Statistics
+
+Using this netdata collector, you can monitor one or more ISC Bind servers.
+
+## Example netdata charts
+
+Depending on the number of views your bind has, you may get a large number of charts.
+Here this is with just one view:
+
+![image](https://cloud.githubusercontent.com/assets/2662304/12765473/879b8e04-ca07-11e5-817d-b0651996c42b.png)
+![image](https://cloud.githubusercontent.com/assets/2662304/12766538/12b272fa-ca0d-11e5-81e1-6a9f8ff488ff.png)
+
+## How it works
+
+The plugin will execute (from within node.js) the equivalent of:
+
+```sh
+curl "http://localhost:8888/json/v1/server"
+```
+
+Here is a sample of the output this command produces.
+
+```js
+{
+ "json-stats-version":"1.0",
+ "boot-time":"2016-01-31T08:20:48Z",
+ "config-time":"2016-01-31T09:28:03Z",
+ "current-time":"2016-02-02T22:22:20Z",
+ "opcodes":{
+ "QUERY":247816,
+ "IQUERY":0,
+ "STATUS":0,
+ "RESERVED3":0,
+ "NOTIFY":0,
+ "UPDATE":3813,
+ "RESERVED6":0,
+ "RESERVED7":0,
+ "RESERVED8":0,
+ "RESERVED9":0,
+ "RESERVED10":0,
+ "RESERVED11":0,
+ "RESERVED12":0,
+ "RESERVED13":0,
+ "RESERVED14":0,
+ "RESERVED15":0
+ },
+ "qtypes":{
+ "A":89519,
+ "NS":863,
+ "CNAME":1,
+ "SOA":1,
+ "PTR":116779,
+ "MX":276,
+ "TXT":198,
+ "AAAA":39324,
+ "SRV":850,
+ "ANY":5
+ },
+ "nsstats":{
+ "Requestv4":251630,
+ "ReqEdns0":1255,
+ "ReqTSIG":3813,
+ "ReqTCP":57,
+ "AuthQryRej":1455,
+ "RecQryRej":122,
+ "Response":245918,
+ "TruncatedResp":44,
+ "RespEDNS0":1255,
+ "RespTSIG":3813,
+ "QrySuccess":205159,
+ "QryAuthAns":119495,
+ "QryNoauthAns":120770,
+ "QryNxrrset":32711,
+ "QrySERVFAIL":262,
+ "QryNXDOMAIN":2395,
+ "QryRecursion":40885,
+ "QryDuplicate":5712,
+ "QryFailure":1577,
+ "UpdateDone":2514,
+ "UpdateFail":1299,
+ "UpdateBadPrereq":1276,
+ "QryUDP":246194,
+ "QryTCP":45,
+ "OtherOpt":101
+ },
+ "views":{
+ "local":{
+ "resolver":{
+ "stats":{
+ "Queryv4":74577,
+ "Responsev4":67032,
+ "NXDOMAIN":601,
+ "SERVFAIL":5,
+ "FORMERR":7,
+ "EDNS0Fail":7,
+ "Truncated":3071,
+ "Lame":4,
+ "Retry":11826,
+ "QueryTimeout":1838,
+ "GlueFetchv4":6864,
+ "GlueFetchv4Fail":30,
+ "QryRTT10":112,
+ "QryRTT100":42900,
+ "QryRTT500":23275,
+ "QryRTT800":534,
+ "QryRTT1600":97,
+ "QryRTT1600+":20,
+ "BucketSize":31,
+ "REFUSED":13
+ },
+ "qtypes":{
+ "A":64931,
+ "NS":870,
+ "CNAME":185,
+ "PTR":5,
+ "MX":49,
+ "TXT":149,
+ "AAAA":7972,
+ "SRV":416
+ },
+ "cache":{
+ "A":40356,
+ "NS":8032,
+ "CNAME":14477,
+ "PTR":2,
+ "MX":21,
+ "TXT":32,
+ "AAAA":3301,
+ "SRV":94,
+ "DS":237,
+ "RRSIG":2301,
+ "NSEC":126,
+ "!A":52,
+ "!NS":4,
+ "!TXT":1,
+ "!AAAA":3797,
+ "!SRV":9,
+ "NXDOMAIN":590
+ },
+ "cachestats":{
+ "CacheHits":1085188,
+ "CacheMisses":109,
+ "QueryHits":464755,
+ "QueryMisses":55624,
+ "DeleteLRU":0,
+ "DeleteTTL":42615,
+ "CacheNodes":5188,
+ "CacheBuckets":2079,
+ "TreeMemTotal":2326026,
+ "TreeMemInUse":1508075,
+ "HeapMemMax":132096,
+ "HeapMemTotal":393216,
+ "HeapMemInUse":132096
+ },
+ "adb":{
+ "nentries":1021,
+ "entriescnt":3157,
+ "nnames":1021,
+ "namescnt":3022
+ }
+ }
+ },
+ "public":{
+ "resolver":{
+ "stats":{
+ "BucketSize":31
+ },
+ "qtypes":{
+ },
+ "cache":{
+ },
+ "cachestats":{
+ "CacheHits":0,
+ "CacheMisses":0,
+ "QueryHits":0,
+ "QueryMisses":0,
+ "DeleteLRU":0,
+ "DeleteTTL":0,
+ "CacheNodes":0,
+ "CacheBuckets":64,
+ "TreeMemTotal":287392,
+ "TreeMemInUse":29608,
+ "HeapMemMax":1024,
+ "HeapMemTotal":262144,
+ "HeapMemInUse":1024
+ },
+ "adb":{
+ "nentries":1021,
+ "nnames":1021
+ }
+ }
+ },
+ "_bind":{
+ "resolver":{
+ "stats":{
+ "BucketSize":31
+ },
+ "qtypes":{
+ },
+ "cache":{
+ },
+ "cachestats":{
+ "CacheHits":0,
+ "CacheMisses":0,
+ "QueryHits":0,
+ "QueryMisses":0,
+ "DeleteLRU":0,
+ "DeleteTTL":0,
+ "CacheNodes":0,
+ "CacheBuckets":64,
+ "TreeMemTotal":287392,
+ "TreeMemInUse":29608,
+ "HeapMemMax":1024,
+ "HeapMemTotal":262144,
+ "HeapMemInUse":1024
+ },
+ "adb":{
+ "nentries":1021,
+ "nnames":1021
+ }
+ }
+ }
+ }
+}
+```
+
+
+From this output it collects:
+
+- Global Received Requests by IP version (IPv4, IPv6)
+- Global Successful Queries
+- Current Recursive Clients
+- Global Queries by IP Protocol (TCP, UDP)
+- Global Queries Analysis
+- Global Received Updates
+- Global Query Failures
+- Global Query Failures Analysis
+- Other Global Server Statistics
+- Global Incoming Requests by OpCode
+- Global Incoming Requests by Query Type
+- Global Socket Statistics (will only work if the url is `http://127.0.0.1:8888/json/v1`, i.e. without `/server`, but keep in mind this produces a very long output and probably will account for 0.5% CPU overhead alone, per bind server added)
+- Per View Statistics (the following set will be added for each bind view):
+ - View, Resolver Active Queries
+ - View, Resolver Statistics
+ - View, Resolver Round Trip Timings
+ - View, Requests by Query Type
+
+## Configuration
+
+The collector (optionally) reads a configuration file named `/etc/netdata/node.d/named.conf`, with the following contents:
+
+```js
+{
+ "enable_autodetect": true,
+ "update_every": 5,
+ "servers": [
+ {
+ "name": "bind1",
+ "url": "http://127.0.0.1:8888/json/v1/server",
+ "update_every": 1
+ },
+ {
+ "name": "bind2",
+ "url": "http://10.1.2.3:8888/json/v1/server",
+ "update_every": 2
+ }
+ ]
+}
+```
+
+You can add any number of bind servers.
+
+If the configuration file is missing, or the key `enable_autodetect` is `true`, the collector will also attempt to fetch `http://localhost:8888/json/v1/server` which, if successful will be added too.
+
+### XML instead of JSON, from bind
+
+The collector can also accept bind URLs that return XML output. This might required if you cannot have bind 9.10+ with JSON but you have an version of bind that supports XML statistics v3. Check [this](https://www.isc.org/blogs/bind-9-10-statistics-troubleshooting-and-zone-configuration/) for versions supported.
+
+In such cases, use a URL like this:
+
+```sh
+curl "http://localhost:8888/xml/v3/server"
+```
+
+Only `xml` and `v3` has been tested.
+
+Keep in mind though, that XML parsing is done using javascript code, which requires a triple conversion:
+
+1. from XML to JSON using a javascript XML parser (**CPU intensive**),
+2. which is then transformed to emulate the output of the JSON output of bind (**CPU intensive** - and yes the converted JSON from XML is different to the native JSON - even bind produces different names for various attributes),
+3. which is then processed to generate the data for the charts (this will happen even if bind is producing JSON).
+
+In general, expect XML parsing to be 2 to 3 times more CPU intensive than JSON.
+
+**So, if you can use the JSON output of bind, prefer it over XML**. Keep also in mind that even bind will use more CPU when generating XML instead of JSON.
+
+The XML interface of bind is not autodetected.
+You will have to provide the config file `/etc/netdata/node.d/named.conf`, like this:
+
+```js
+{
+ "enable_autodetect": false,
+ "update_every": 1,
+ "servers": [
+ {
+ "name": "local",
+ "url": "http://localhost:8888/xml/v3/server",
+ "update_every": 1
+ }
+ ]
+}
+```
+
+Of course, you can monitor more than one bind servers. Each one can be configured with either JSON or XML output.
+
+## Auto-detection
+
+Auto-detection is controlled by `enable_autodetect` in the config file. The default is enabled, so that if the collector can connect to `http://localhost:8888/json/v1/server` to receive bind statistics, it will automatically enable it.
+
+## Bind (named) configuration
+
+To use this plugin, you have to have bind v9.10+ properly compiled to provide statistics in `JSON` format.
+
+For more information on how to get your bind installation ready, please refer to the [bind statistics channel developer comments](http://jpmens.net/2013/03/18/json-in-bind-9-s-statistics-server/) and to [bind documentation](https://ftp.isc.org/isc/bind/9.10.3/doc/arm/Bv9ARM.ch06.html#statistics) or [bind Knowledge Base article AA-01123](https://kb.isc.org/article/AA-01123/0).
+
+Normally, you will need something like this in your `named.conf`:
+
+```
+statistics-channels {
+ inet 127.0.0.1 port 8888 allow { 127.0.0.1; };
+ inet ::1 port 8888 allow { ::1; };
+};
+```
+
+(use the IPv4 or IPv6 line depending on what you are using, you can also use both)
+
+Verify it works by running the following command (the collector is written in node.js and will query your bind server directly, but if this command works, the collector should be able to work too):
+
+```sh
+curl "http://localhost:8888/json/v1/server"
+```
+
diff --git a/collectors/node.d.plugin/named/named.node.js b/collectors/node.d.plugin/named/named.node.js
new file mode 100644
index 000000000..d13c608cb
--- /dev/null
+++ b/collectors/node.d.plugin/named/named.node.js
@@ -0,0 +1,610 @@
+'use strict';
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+// collect statistics from bind (named) v9.10+
+//
+// bind statistics documentation at:
+// http://jpmens.net/2013/03/18/json-in-bind-9-s-statistics-server/
+// https://ftp.isc.org/isc/bind/9.10.3/doc/arm/Bv9ARM.ch06.html#statistics
+
+// example configuration in /etc/netdata/node.d/named.conf
+// the module supports auto-detection if bind is running at localhost
+
+/*
+{
+ "enable_autodetect": true,
+ "update_every": 5,
+ "servers": [
+ {
+ "name": "bind1",
+ "url": "http://127.0.0.1:8888/json/v1/server",
+ "update_every": 1
+ },
+ {
+ "name": "bind2",
+ "url": "http://10.0.0.1:8888/xml/v3/server",
+ "update_every": 2
+ }
+ ]
+}
+*/
+
+// the following is the bind named.conf configuration required
+
+/*
+statistics-channels {
+ inet 127.0.0.1 port 8888 allow { 127.0.0.1; };
+};
+*/
+
+require('url');
+require('http');
+var XML = require('pixl-xml');
+var netdata = require('netdata');
+
+if(netdata.options.DEBUG === true) netdata.debug('loaded', __filename, 'plugin');
+
+var named = {
+ name: __filename,
+ enable_autodetect: true,
+ update_every: 1,
+ base_priority: 60000,
+ charts: {},
+
+ chartFromMembersCreate: function(service, obj, id, title_suffix, units, family, context, type, priority, algorithm, multiplier, divisor) {
+ var chart = {
+ id: id, // the unique id of the chart
+ name: '', // the unique name of the chart
+ title: service.name + ' ' + title_suffix, // the title of the chart
+ units: units, // the units of the chart dimensions
+ family: family, // the family of the chart
+ context: context, // the context of the chart
+ type: type, // the type of the chart
+ priority: priority, // the priority relative to others in the same family
+ update_every: service.update_every, // the expected update frequency of the chart
+ dimensions: {}
+ };
+
+ var found = 0;
+ var dims = Object.keys(obj);
+ var len = dims.length;
+ for(var i = 0; i < len ;i++) {
+ var x = dims[i];
+
+ if(typeof(obj[x]) !== 'undefined' && obj[x] !== 0) {
+ found++;
+ chart.dimensions[x] = {
+ id: x, // the unique id of the dimension
+ name: x, // the name of the dimension
+ algorithm: algorithm, // the id of the netdata algorithm
+ multiplier: multiplier, // the multiplier
+ divisor: divisor, // the divisor
+ hidden: false // is hidden (boolean)
+ };
+ }
+ }
+
+ if(!found)
+ return null;
+
+ chart = service.chart(id, chart);
+ this.charts[id] = chart;
+ return chart;
+ },
+
+ chartFromMembers: function(service, obj, id_suffix, title_suffix, units, family, context, type, priority, algorithm, multiplier, divisor) {
+ var id = 'named_' + service.name + '.' + id_suffix;
+ var chart = this.charts[id];
+ var dims, len, x, i;
+
+ if(typeof chart === 'undefined') {
+ chart = this.chartFromMembersCreate(service, obj, id, title_suffix, units, family, context, type, priority, algorithm, multiplier, divisor);
+ if(chart === null) return false;
+ }
+ else {
+ // check if we need to re-generate the chart
+ dims = Object.keys(obj);
+ len = dims.length;
+ for(i = 0; i < len ;i++) {
+ x = dims[i];
+ if(typeof(chart.dimensions[x]) === 'undefined') {
+ chart = this.chartFromMembersCreate(service, obj, id, title_suffix, units, family, context, type, priority, algorithm, multiplier, divisor);
+ if(chart === null) return false;
+ break;
+ }
+ }
+ }
+
+ service.begin(chart);
+
+ var found = 0;
+ dims = Object.keys(obj);
+ len = dims.length;
+ for(i = 0; i < len ;i++) {
+ x = dims[i];
+ if(typeof(chart.dimensions[x]) !== 'undefined') {
+ found++;
+ service.set(x, obj[x]);
+ }
+ }
+
+ service.end();
+
+ return (found > 0);
+ },
+
+ // an index to map values to different charts
+ lookups: {
+ nsstats: {},
+ resolver_stats: {},
+ numfetch: {}
+ },
+
+ // transform the XML response of bind
+ // to the JSON response of bind
+ xml2js: function(service, data_xml) {
+ var d = XML.parse(data_xml);
+ if(d === null) return null;
+
+ var a, aa, alen, alen2;
+
+ var data = {};
+ var len = d.server.counters.length;
+ while(len--) {
+ a = d.server.counters[len];
+ if(typeof a.counter === 'undefined') continue;
+ if(a.type === 'opcode') a.type = 'opcodes';
+ else if(a.type === 'qtype') a.type = 'qtypes';
+ else if(a.type === 'nsstat') a.type = 'nsstats';
+ aa = data[a.type] = {};
+ alen = 0;
+ alen2 = a.counter.length;
+ while(alen < alen2) {
+ aa[a.counter[alen].name] = parseInt(a.counter[alen]._Data, 10);
+ alen++;
+ }
+ }
+
+ data.views = {};
+ var vlen = d.views.view.length;
+ while(vlen--) {
+ var vname = d.views.view[vlen].name;
+ data.views[vname] = { resolver: {} };
+ len = d.views.view[vlen].counters.length;
+ while(len--) {
+ a = d.views.view[vlen].counters[len];
+ if(typeof a.counter === 'undefined') continue;
+ if(a.type === 'resstats') a.type = 'stats';
+ else if(a.type === 'resqtype') a.type = 'qtypes';
+ else if(a.type === 'adbstat') a.type = 'adb';
+ aa = data.views[vname].resolver[a.type] = {};
+ alen = 0;
+ alen2 = a.counter.length;
+ while(alen < alen2) {
+ aa[a.counter[alen].name] = parseInt(a.counter[alen]._Data, 10);
+ alen++;
+ }
+ }
+ }
+
+ return data;
+ },
+
+ processResponse: function(service, data) {
+ if(data !== null) {
+ var r, x, look, id, chart, keys, len;
+
+ // parse XML or JSON
+ // pepending on the URL given
+ if(service.request.path.match(/^\/xml/) !== null)
+ r = named.xml2js(service, data);
+ else
+ r = JSON.parse(data);
+
+ if(typeof r === 'undefined' || r === null) {
+ service.error("Cannot parse these data: " + data.toString());
+ return;
+ }
+
+ if(service.added !== true)
+ service.commit();
+
+ if(typeof r.nsstats !== 'undefined') {
+ // we split the nsstats object to several others
+ var global_requests = {}, global_requests_enable = false;
+ var global_failures = {}, global_failures_enable = false;
+ var global_failures_detail = {}, global_failures_detail_enable = false;
+ var global_updates = {}, global_updates_enable = false;
+ var protocol_queries = {}, protocol_queries_enable = false;
+ var global_queries = {}, global_queries_enable = false;
+ var global_queries_success = {}, global_queries_success_enable = false;
+ var default_enable = false;
+ var RecursClients = 0;
+
+ // RecursClients is an absolute value
+ if(typeof r.nsstats['RecursClients'] !== 'undefined') {
+ RecursClients = r.nsstats['RecursClients'];
+ delete r.nsstats['RecursClients'];
+ }
+
+ keys = Object.keys(r.nsstats);
+ len = keys.length;
+ while(len--) {
+ x = keys[len];
+
+ // we maintain an index of the values found
+ // mapping them to objects splitted
+
+ look = named.lookups.nsstats[x];
+ if(typeof look === 'undefined') {
+ // a new value, not found in the index
+ // index it:
+ if(x === 'Requestv4') {
+ named.lookups.nsstats[x] = {
+ name: 'IPv4',
+ type: 'global_requests'
+ };
+ }
+ else if(x === 'Requestv6') {
+ named.lookups.nsstats[x] = {
+ name: 'IPv6',
+ type: 'global_requests'
+ };
+ }
+ else if(x === 'QryFailure') {
+ named.lookups.nsstats[x] = {
+ name: 'failures',
+ type: 'global_failures'
+ };
+ }
+ else if(x === 'QryUDP') {
+ named.lookups.nsstats[x] = {
+ name: 'UDP',
+ type: 'protocol_queries'
+ };
+ }
+ else if(x === 'QryTCP') {
+ named.lookups.nsstats[x] = {
+ name: 'TCP',
+ type: 'protocol_queries'
+ };
+ }
+ else if(x === 'QrySuccess') {
+ named.lookups.nsstats[x] = {
+ name: 'queries',
+ type: 'global_queries_success'
+ };
+ }
+ else if(x.match(/QryRej$/) !== null) {
+ named.lookups.nsstats[x] = {
+ name: x,
+ type: 'global_failures_detail'
+ };
+ }
+ else if(x.match(/^Qry/) !== null) {
+ named.lookups.nsstats[x] = {
+ name: x,
+ type: 'global_queries'
+ };
+ }
+ else if(x.match(/^Update/) !== null) {
+ named.lookups.nsstats[x] = {
+ name: x,
+ type: 'global_updates'
+ };
+ }
+ else {
+ // values not mapped, will remain
+ // in the default map
+ named.lookups.nsstats[x] = {
+ name: x,
+ type: 'default'
+ };
+ }
+
+ look = named.lookups.nsstats[x];
+ // netdata.error('lookup nsstats value: ' + x + ' >>> ' + named.lookups.nsstats[x].type);
+ }
+
+ switch(look.type) {
+ case 'global_requests': global_requests[look.name] = r.nsstats[x]; delete r.nsstats[x]; global_requests_enable = true; break;
+ case 'global_queries': global_queries[look.name] = r.nsstats[x]; delete r.nsstats[x]; global_queries_enable = true; break;
+ case 'global_queries_success': global_queries_success[look.name] = r.nsstats[x]; delete r.nsstats[x]; global_queries_success_enable = true; break;
+ case 'global_updates': global_updates[look.name] = r.nsstats[x]; delete r.nsstats[x]; global_updates_enable = true; break;
+ case 'protocol_queries': protocol_queries[look.name] = r.nsstats[x]; delete r.nsstats[x]; protocol_queries_enable = true; break;
+ case 'global_failures': global_failures[look.name] = r.nsstats[x]; delete r.nsstats[x]; global_failures_enable = true; break;
+ case 'global_failures_detail': global_failures_detail[look.name] = r.nsstats[x]; delete r.nsstats[x]; global_failures_detail_enable = true; break;
+ default: default_enable = true; break;
+ }
+ }
+
+ if(global_requests_enable === true)
+ service.module.chartFromMembers(service, global_requests, 'received_requests', 'Bind, Global Received Requests by IP version', 'requests/s', 'requests', 'named.requests', netdata.chartTypes.stacked, named.base_priority + 1, netdata.chartAlgorithms.incremental, 1, 1);
+
+ if(global_queries_success_enable === true)
+ service.module.chartFromMembers(service, global_queries_success, 'global_queries_success', 'Bind, Global Successful Queries', 'queries/s', 'queries', 'named.queries_succcess', netdata.chartTypes.line, named.base_priority + 2, netdata.chartAlgorithms.incremental, 1, 1);
+
+ if(protocol_queries_enable === true)
+ service.module.chartFromMembers(service, protocol_queries, 'protocols_queries', 'Bind, Global Queries by IP Protocol', 'queries/s', 'queries', 'named.protocol_queries', netdata.chartTypes.stacked, named.base_priority + 3, netdata.chartAlgorithms.incremental, 1, 1);
+
+ if(global_queries_enable === true)
+ service.module.chartFromMembers(service, global_queries, 'global_queries', 'Bind, Global Queries Analysis', 'queries/s', 'queries', 'named.global_queries', netdata.chartTypes.stacked, named.base_priority + 4, netdata.chartAlgorithms.incremental, 1, 1);
+
+ if(global_updates_enable === true)
+ service.module.chartFromMembers(service, global_updates, 'received_updates', 'Bind, Global Received Updates', 'updates/s', 'updates', 'named.global_updates', netdata.chartTypes.stacked, named.base_priority + 5, netdata.chartAlgorithms.incremental, 1, 1);
+
+ if(global_failures_enable === true)
+ service.module.chartFromMembers(service, global_failures, 'query_failures', 'Bind, Global Query Failures', 'failures/s', 'failures', 'named.global_failures', netdata.chartTypes.line, named.base_priority + 6, netdata.chartAlgorithms.incremental, 1, 1);
+
+ if(global_failures_detail_enable === true)
+ service.module.chartFromMembers(service, global_failures_detail, 'query_failures_detail', 'Bind, Global Query Failures Analysis', 'failures/s', 'failures', 'named.global_failures_detail', netdata.chartTypes.stacked, named.base_priority + 7, netdata.chartAlgorithms.incremental, 1, 1);
+
+ if(default_enable === true)
+ service.module.chartFromMembers(service, r.nsstats, 'nsstats', 'Bind, Other Global Server Statistics', 'operations/s', 'other', 'named.nsstats', netdata.chartTypes.line, named.base_priority + 8, netdata.chartAlgorithms.incremental, 1, 1);
+
+ // RecursClients chart
+ id = 'named_' + service.name + '.recursive_clients';
+ chart = named.charts[id];
+
+ if(typeof chart === 'undefined') {
+ chart = {
+ id: id, // the unique id of the chart
+ name: '', // the unique name of the chart
+ title: service.name + ' Bind, Current Recursive Clients', // the title of the chart
+ units: 'clients', // the units of the chart dimensions
+ family: 'clients', // the family of the chart
+ context: 'named.recursive_clients', // the context of the chart
+ type: netdata.chartTypes.line, // the type of the chart
+ priority: named.base_priority + 1, // the priority relative to others in the same family
+ update_every: service.update_every, // the expected update frequency of the chart
+ dimensions: {
+ 'clients': {
+ id: 'clients', // the unique id of the dimension
+ name: '', // the name of the dimension
+ algorithm: netdata.chartAlgorithms.absolute,// the id of the netdata algorithm
+ multiplier: 1, // the multiplier
+ divisor: 1, // the divisor
+ hidden: false // is hidden (boolean)
+ }
+ }
+ };
+
+ chart = service.chart(id, chart);
+ named.charts[id] = chart;
+ }
+
+ service.begin(chart);
+ service.set('clients', RecursClients);
+ service.end();
+ }
+
+ if(typeof r.opcodes !== 'undefined')
+ service.module.chartFromMembers(service, r.opcodes, 'in_opcodes', 'Bind, Global Incoming Requests by OpCode', 'requests/s', 'requests', 'named.in_opcodes', netdata.chartTypes.stacked, named.base_priority + 9, netdata.chartAlgorithms.incremental, 1, 1);
+
+ if(typeof r.qtypes !== 'undefined')
+ service.module.chartFromMembers(service, r.qtypes, 'in_qtypes', 'Bind, Global Incoming Requests by Query Type', 'requests/s', 'requests', 'named.in_qtypes', netdata.chartTypes.stacked, named.base_priority + 10, netdata.chartAlgorithms.incremental, 1, 1);
+
+ if(typeof r.sockstats !== 'undefined')
+ service.module.chartFromMembers(service, r.sockstats, 'in_sockstats', 'Bind, Global Socket Statistics', 'operations/s', 'sockets', 'named.in_sockstats', netdata.chartTypes.line, named.base_priority + 11, netdata.chartAlgorithms.incremental, 1, 1);
+
+ if(typeof r.views !== 'undefined') {
+ keys = Object.keys(r.views);
+ len = keys.length;
+ while(len--) {
+ x = keys[len];
+ var resolver = r.views[x].resolver;
+
+ if(typeof resolver !== 'undefined') {
+ if(typeof resolver.stats !== 'undefined') {
+ var NumFetch = 0;
+ var key = service.name + '.' + x;
+ var rtt = {}, rtt_enable = false;
+ default_enable = false;
+
+ // NumFetch is an absolute value
+ if(typeof resolver.stats['NumFetch'] !== 'undefined') {
+ named.lookups.numfetch[key] = true;
+ NumFetch = resolver.stats['NumFetch'];
+ delete resolver.stats['NumFetch'];
+ }
+ if(typeof resolver.stats['BucketSize'] !== 'undefined') {
+ delete resolver.stats['BucketSize'];
+ }
+
+ // split the QryRTT* from the main chart
+ var ykeys = Object.keys(resolver.stats);
+ var ylen = ykeys.length;
+ while(ylen--) {
+ var y = ykeys[ylen];
+
+ // we maintain an index of the values found
+ // mapping them to objects splitted
+
+ look = named.lookups.resolver_stats[y];
+ if(typeof look === 'undefined') {
+ if(y.match(/^QryRTT/) !== null) {
+ named.lookups.resolver_stats[y] = {
+ name: y,
+ type: 'rtt'
+ };
+ }
+ else {
+ named.lookups.resolver_stats[y] = {
+ name: y,
+ type: 'default'
+ };
+ }
+
+ look = named.lookups.resolver_stats[y];
+ // netdata.error('lookup resolver stats value: ' + y + ' >>> ' + look.type);
+ }
+
+ switch(look.type) {
+ case 'rtt': rtt[look.name] = resolver.stats[y]; delete resolver.stats[y]; rtt_enable = true; break;
+ default: default_enable = true; break;
+ }
+ }
+
+ if(rtt_enable)
+ service.module.chartFromMembers(service, rtt, 'view_resolver_rtt_' + x, 'Bind, ' + x + ' View, Resolver Round Trip Timings', 'queries/s', 'view_' + x, 'named.resolver_rtt', netdata.chartTypes.stacked, named.base_priority + 12, netdata.chartAlgorithms.incremental, 1, 1);
+
+ if(default_enable)
+ service.module.chartFromMembers(service, resolver.stats, 'view_resolver_stats_' + x, 'Bind, ' + x + ' View, Resolver Statistics', 'operations/s', 'view_' + x, 'named.resolver_stats', netdata.chartTypes.line, named.base_priority + 13, netdata.chartAlgorithms.incremental, 1, 1);
+
+ // NumFetch chart
+ if(typeof named.lookups.numfetch[key] !== 'undefined') {
+ id = 'named_' + service.name + '.view_resolver_numfetch_' + x;
+ chart = named.charts[id];
+
+ if(typeof chart === 'undefined') {
+ chart = {
+ id: id, // the unique id of the chart
+ name: '', // the unique name of the chart
+ title: service.name + ' Bind, ' + x + ' View, Resolver Active Queries', // the title of the chart
+ units: 'queries', // the units of the chart dimensions
+ family: 'view_' + x, // the family of the chart
+ context: 'named.resolver_active_queries', // the context of the chart
+ type: netdata.chartTypes.line, // the type of the chart
+ priority: named.base_priority + 1001, // the priority relative to others in the same family
+ update_every: service.update_every, // the expected update frequency of the chart
+ dimensions: {
+ 'queries': {
+ id: 'queries', // the unique id of the dimension
+ name: '', // the name of the dimension
+ algorithm: netdata.chartAlgorithms.absolute,// the id of the netdata algorithm
+ multiplier: 1, // the multiplier
+ divisor: 1, // the divisor
+ hidden: false // is hidden (boolean)
+ }
+ }
+ };
+
+ chart = service.chart(id, chart);
+ named.charts[id] = chart;
+ }
+
+ service.begin(chart);
+ service.set('queries', NumFetch);
+ service.end();
+ }
+ }
+ }
+
+ if(typeof resolver.qtypes !== 'undefined')
+ service.module.chartFromMembers(service, resolver.qtypes, 'view_resolver_qtypes_' + x, 'Bind, ' + x + ' View, Requests by Query Type', 'requests/s', 'view_' + x, 'named.resolver_qtypes', netdata.chartTypes.stacked, named.base_priority + 14, netdata.chartAlgorithms.incremental, 1, 1);
+
+ //if(typeof resolver.cache !== 'undefined')
+ // service.module.chartFromMembers(service, resolver.cache, 'view_resolver_cache_' + x, 'Bind, ' + x + ' View, Cache Entries', 'entries', 'view_' + x, 'named.resolver_cache', netdata.chartTypes.stacked, named.base_priority + 15, netdata.chartAlgorithms.absolute, 1, 1);
+
+ if(typeof resolver.cachestats['CacheHits'] !== 'undefined' && resolver.cachestats['CacheHits'] > 0) {
+ id = 'named_' + service.name + '.view_resolver_cachehits_' + x;
+ chart = named.charts[id];
+
+ if(typeof chart === 'undefined') {
+ chart = {
+ id: id, // the unique id of the chart
+ name: '', // the unique name of the chart
+ title: service.name + ' Bind, ' + x + ' View, Resolver Cache Hits', // the title of the chart
+ units: 'operations/s', // the units of the chart dimensions
+ family: 'view_' + x, // the family of the chart
+ context: 'named.resolver_cache_hits', // the context of the chart
+ type: netdata.chartTypes.area, // the type of the chart
+ priority: named.base_priority + 1100, // the priority relative to others in the same family
+ update_every: service.update_every, // the expected update frequency of the chart
+ dimensions: {
+ 'CacheHits': {
+ id: 'CacheHits', // the unique id of the dimension
+ name: 'hits', // the name of the dimension
+ algorithm: netdata.chartAlgorithms.incremental,// the id of the netdata algorithm
+ multiplier: 1, // the multiplier
+ divisor: 1, // the divisor
+ hidden: false // is hidden (boolean)
+ },
+ 'CacheMisses': {
+ id: 'CacheMisses', // the unique id of the dimension
+ name: 'misses', // the name of the dimension
+ algorithm: netdata.chartAlgorithms.incremental,// the id of the netdata algorithm
+ multiplier: -1, // the multiplier
+ divisor: 1, // the divisor
+ hidden: false // is hidden (boolean)
+ }
+ }
+ };
+
+ chart = service.chart(id, chart);
+ named.charts[id] = chart;
+ }
+
+ service.begin(chart);
+ service.set('CacheHits', resolver.cachestats['CacheHits']);
+ service.set('CacheMisses', resolver.cachestats['CacheMisses']);
+ service.end();
+ }
+
+ // this is wrong, it contains many types of info:
+ // 1. CacheHits, CacheMisses - incremental (added above)
+ // 2. QueryHits, QueryMisses - incremental
+ // 3. DeleteLRU, DeleteTTL - incremental
+ // 4. CacheNodes, CacheBuckets - absolute
+ // 5. TreeMemTotal, TreeMemInUse - absolute
+ // 6. HeapMemMax, HeapMemTotal, HeapMemInUse - absolute
+ //if(typeof resolver.cachestats !== 'undefined')
+ // service.module.chartFromMembers(service, resolver.cachestats, 'view_resolver_cachestats_' + x, 'Bind, ' + x + ' View, Cache Statistics', 'requests/s', 'view_' + x, 'named.resolver_cache_stats', netdata.chartTypes.line, named.base_priority + 1001, netdata.chartAlgorithms.incremental, 1, 1);
+
+ //if(typeof resolver.adb !== 'undefined')
+ // service.module.chartFromMembers(service, resolver.adb, 'view_resolver_adb_' + x, 'Bind, ' + x + ' View, ADB Statistics', 'entries', 'view_' + x, 'named.resolver_adb', netdata.chartTypes.line, named.base_priority + 1002, netdata.chartAlgorithms.absolute, 1, 1);
+ }
+ }
+ }
+ },
+
+ // module.serviceExecute()
+ // this function is called only from this module
+ // its purpose is to prepare the request and call
+ // netdata.serviceExecute()
+ serviceExecute: function(name, a_url, update_every) {
+ if(netdata.options.DEBUG === true) netdata.debug(this.name + ': ' + name + ': url: ' + a_url + ', update_every: ' + update_every);
+ var service = netdata.service({
+ name: name,
+ request: netdata.requestFromURL(a_url),
+ update_every: update_every,
+ module: this
+ });
+
+ service.execute(this.processResponse);
+ },
+
+ configure: function(config) {
+ var added = 0;
+
+ if(this.enable_autodetect === true) {
+ this.serviceExecute('local', 'http://localhost:8888/json/v1/server', this.update_every);
+ added++;
+ }
+
+ if(typeof(config.servers) !== 'undefined') {
+ var len = config.servers.length;
+ while(len--) {
+ if(typeof config.servers[len].update_every === 'undefined')
+ config.servers[len].update_every = this.update_every;
+
+ this.serviceExecute(config.servers[len].name, config.servers[len].url, config.servers[len].update_every);
+ added++;
+ }
+ }
+
+ return added;
+ },
+
+ // module.update()
+ // this is called repeatidly to collect data, by calling
+ // netdata.serviceExecute()
+ update: function(service, callback) {
+ service.execute(function(serv, data) {
+ service.module.processResponse(serv, data);
+ callback();
+ });
+ }
+};
+
+module.exports = named;
diff --git a/collectors/node.d.plugin/node.d.conf b/collectors/node.d.plugin/node.d.conf
new file mode 100644
index 000000000..95aec99ce
--- /dev/null
+++ b/collectors/node.d.plugin/node.d.conf
@@ -0,0 +1,39 @@
+{
+ "___help_1": "Default options for node.d.plugin - this is a JSON file.",
+ "___help_2": "Use http://jsonlint.com/ to verify it is valid JSON.",
+ "___help_3": "------------------------------------------------------------",
+
+ "___help_update_every": "Minimum data collection frequency for all node.d/*.node.js modules. Set it to 0 to inherit it from netdata.",
+ "update_every": 0,
+
+ "___help_modules_enable_autodetect": "Enable/disable auto-detection for node.d/*.node.js modules that support it.",
+ "modules_enable_autodetect": true,
+
+ "___help_modules_enable_all": "Enable all node.d/*.node.js modules by default.",
+ "modules_enable_all": true,
+
+ "___help_modules": "Enable/disable the following modules. Give only XXX for node.d/XXX.node.js",
+ "modules": {
+ "named": {
+ "enabled": true
+ },
+ "sma_webbox": {
+ "enabled": true
+ },
+ "snmp": {
+ "enabled": true
+ }
+ },
+
+ "___help_paths": "Paths that control the operation of node.d.plugin",
+ "paths": {
+ "___help_plugins": "The full path to the modules javascript node.d/ directory",
+ "plugins": null,
+
+ "___help_config": "The full path to the modules configs node.d/ directory",
+ "config": null,
+
+ "___help_modules": "Array of paths to add to node.js when searching for node_modules",
+ "modules": []
+ }
+}
diff --git a/collectors/node.d.plugin/node.d.plugin b/collectors/node.d.plugin/node.d.plugin
new file mode 100644
index 000000000..2570220c2
--- /dev/null
+++ b/collectors/node.d.plugin/node.d.plugin
@@ -0,0 +1,303 @@
+#!/usr/bin/env bash
+':' //; exec "$(command -v nodejs || command -v node || echo "ERROR node IS NOT AVAILABLE IN THIS SYSTEM")" "$0" "$@"
+
+// shebang hack from:
+// http://unix.stackexchange.com/questions/65235/universal-node-js-shebang
+
+// Initially this is run as a shell script.
+// Then, the second line, finds nodejs or node or js in the system path
+// and executes it with the shell parameters.
+
+// netdata
+// real-time performance and health monitoring, done right!
+// (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+// --------------------------------------------------------------------------------------------------------------------
+
+'use strict';
+
+// --------------------------------------------------------------------------------------------------------------------
+// get NETDATA environment variables
+
+var NETDATA_PLUGINS_DIR = process.env.NETDATA_PLUGINS_DIR || __dirname;
+var NETDATA_USER_CONFIG_DIR = process.env.NETDATA_USER_CONFIG_DIR || '/usr/local/etc/netdata';
+var NETDATA_STOCK_CONFIG_DIR = process.env.NETDATA_STOCK_CONFIG_DIR || '/usr/local/lib/netdata/conf.d';
+var NETDATA_UPDATE_EVERY = process.env.NETDATA_UPDATE_EVERY || 1;
+var NODE_D_DIR = NETDATA_PLUGINS_DIR + '/../node.d';
+
+// make sure the modules are found
+process.mainModule.paths.unshift(NODE_D_DIR + '/node_modules');
+process.mainModule.paths.unshift(NODE_D_DIR);
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// load required modules
+
+var fs = require('fs');
+var url = require('url');
+var util = require('util');
+var http = require('http');
+var path = require('path');
+var extend = require('extend');
+var netdata = require('netdata');
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// configuration
+
+function netdata_read_json_config_file(module_filename) {
+ var f = path.basename(module_filename);
+
+ var ufilename, sfilename;
+
+ var m = f.match('.plugin' + '$');
+ if(m !== null) {
+ ufilename = netdata.options.paths.config + '/' + f.substring(0, m.index) + '.conf';
+ sfilename = netdata.options.paths.stock_config + '/' + f.substring(0, m.index) + '.conf';
+ }
+
+ m = f.match('.node.js' + '$');
+ if(m !== null) {
+ ufilename = netdata.options.paths.config + '/node.d/' + f.substring(0, m.index) + '.conf';
+ sfilename = netdata.options.paths.stock_config + '/node.d/' + f.substring(0, m.index) + '.conf';
+ }
+
+ try {
+ netdata.debug('loading module\'s ' + module_filename + ' user-config ' + ufilename);
+ return JSON.parse(fs.readFileSync(ufilename, 'utf8'));
+ }
+ catch(e) {
+ netdata.error('Cannot read user-configuration file ' + ufilename + ': ' + e.message + '.');
+ dumpError(e);
+ }
+
+ try {
+ netdata.debug('loading module\'s ' + module_filename + ' stock-config ' + sfilename);
+ return JSON.parse(fs.readFileSync(sfilename, 'utf8'));
+ }
+ catch(e) {
+ netdata.error('Cannot read stock-configuration file ' + sfilename + ': ' + e.message + ', using internal defaults.');
+ dumpError(e);
+ }
+
+ return {};
+}
+
+// internal defaults
+extend(true, netdata.options, {
+ filename: path.basename(__filename),
+
+ update_every: NETDATA_UPDATE_EVERY,
+
+ paths: {
+ plugins: NETDATA_PLUGINS_DIR,
+ config: NETDATA_USER_CONFIG_DIR,
+ stock_config: NETDATA_STOCK_CONFIG_DIR,
+ modules: []
+ },
+
+ modules_enable_autodetect: true,
+ modules_enable_all: true,
+ modules: {}
+});
+
+// load configuration file
+netdata.options_loaded = netdata_read_json_config_file(__filename);
+extend(true, netdata.options, netdata.options_loaded);
+
+if(!netdata.options.paths.plugins)
+ netdata.options.paths.plugins = NETDATA_PLUGINS_DIR;
+
+if(!netdata.options.paths.config)
+ netdata.options.paths.config = NETDATA_USER_CONFIG_DIR;
+
+if(!netdata.options.paths.stock_config)
+ netdata.options.paths.stock_config = NETDATA_STOCK_CONFIG_DIR;
+
+// console.error('merged netdata object:');
+// console.error(util.inspect(netdata, {depth: 10}));
+
+
+// apply module paths to node.js process
+function applyModulePaths() {
+ var len = netdata.options.paths.modules.length;
+ while(len--)
+ process.mainModule.paths.unshift(netdata.options.paths.modules[len]);
+}
+applyModulePaths();
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// tracing
+
+function dumpError(err) {
+ if (typeof err === 'object') {
+ if (err.stack) {
+ netdata.debug(err.stack);
+ }
+ }
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// get command line arguments
+{
+ var found_myself = false;
+ var found_number = false;
+ var found_modules = false;
+ process.argv.forEach(function (val, index, array) {
+ netdata.debug('PARAM: ' + val);
+
+ if(!found_myself) {
+ if(val === __filename)
+ found_myself = true;
+ }
+ else {
+ switch(val) {
+ case 'debug':
+ netdata.options.DEBUG = true;
+ netdata.debug('DEBUG enabled');
+ break;
+
+ default:
+ if(found_number === true) {
+ if(found_modules === false) {
+ for(var i in netdata.options.modules)
+ netdata.options.modules[i].enabled = false;
+ }
+
+ if(typeof netdata.options.modules[val] === 'undefined')
+ netdata.options.modules[val] = {};
+
+ netdata.options.modules[val].enabled = true;
+ netdata.options.modules_enable_all = false;
+ netdata.debug('enabled module ' + val);
+ }
+ else {
+ try {
+ var x = parseInt(val);
+ if(x > 0) {
+ netdata.options.update_every = x;
+ if(netdata.options.update_every < NETDATA_UPDATE_EVERY) {
+ netdata.options.update_every = NETDATA_UPDATE_EVERY;
+ netdata.debug('Update frequency ' + x + 's is too low');
+ }
+
+ found_number = true;
+ netdata.debug('Update frequency set to ' + netdata.options.update_every + ' seconds');
+ }
+ else netdata.error('Ignoring parameter: ' + val);
+ }
+ catch(e) {
+ netdata.error('Cannot get value of parameter: ' + val);
+ dumpError(e);
+ }
+ }
+ break;
+ }
+ }
+ });
+}
+
+if(netdata.options.update_every < 1) {
+ netdata.debug('Adjusting update frequency to 1 second');
+ netdata.options.update_every = 1;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// find modules
+
+function findModules() {
+ var found = 0;
+
+ var files = fs.readdirSync(NODE_D_DIR);
+ var len = files.length;
+ while(len--) {
+ var m = files[len].match('.node.js' + '$');
+ if(m !== null) {
+ var n = files[len].substring(0, m.index);
+
+ if(typeof(netdata.options.modules[n]) === 'undefined')
+ netdata.options.modules[n] = { name: n, enabled: netdata.options.modules_enable_all };
+
+ if(netdata.options.modules[n].enabled === true) {
+ netdata.options.modules[n].name = n;
+ netdata.options.modules[n].filename = NODE_D_DIR + '/' + files[len];
+ netdata.options.modules[n].loaded = false;
+
+ // load the module
+ try {
+ netdata.debug('loading module ' + netdata.options.modules[n].filename);
+ netdata.options.modules[n].module = require(netdata.options.modules[n].filename);
+ netdata.options.modules[n].module.name = n;
+ netdata.debug('loaded module ' + netdata.options.modules[n].name + ' from ' + netdata.options.modules[n].filename);
+ }
+ catch(e) {
+ netdata.options.modules[n].enabled = false;
+ netdata.error('Cannot load module: ' + netdata.options.modules[n].filename + ' exception: ' + e);
+ dumpError(e);
+ continue;
+ }
+
+ // load its configuration
+ var c = {
+ enable_autodetect: netdata.options.modules_enable_autodetect,
+ update_every: netdata.options.update_every
+ };
+
+ var c2 = netdata_read_json_config_file(files[len]);
+ extend(true, c, c2);
+
+ // call module auto-detection / configuration
+ try {
+ netdata.modules_configuring++;
+ netdata.debug('Configuring module ' + netdata.options.modules[n].name);
+ var serv = netdata.configure(netdata.options.modules[n].module, c, function() {
+ netdata.debug('Configured module ' + netdata.options.modules[n].name);
+ netdata.modules_configuring--;
+ });
+
+ netdata.debug('Configuring module ' + netdata.options.modules[n].name + ' reports ' + serv + ' eligible services.');
+ }
+ catch(e) {
+ netdata.modules_configuring--;
+ netdata.options.modules[n].enabled = false;
+ netdata.error('Failed module auto-detection: ' + netdata.options.modules[n].name + ' exception: ' + e + ', disabling module.');
+ dumpError(e);
+ continue;
+ }
+
+ netdata.options.modules[n].loaded = true;
+ found++;
+ }
+ }
+ }
+
+ // netdata.debug(netdata.options.modules);
+ return found;
+}
+
+if(findModules() === 0) {
+ netdata.error('Cannot load any .node.js module from: ' + NODE_D_DIR);
+ netdata.disableNodePlugin();
+ process.exit(1);
+}
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// start
+
+function start_when_configuring_ends() {
+ if(netdata.modules_configuring > 0) {
+ netdata.debug('Waiting modules configuration, still running ' + netdata.modules_configuring);
+ setTimeout(start_when_configuring_ends, 500);
+ return;
+ }
+
+ netdata.modules_configuring = 0;
+ netdata.start();
+}
+start_when_configuring_ends();
+
+//netdata.debug('netdata object:')
+//netdata.debug(netdata);
diff --git a/collectors/node.d.plugin/node.d.plugin.in b/collectors/node.d.plugin/node.d.plugin.in
new file mode 100755
index 000000000..05c126e90
--- /dev/null
+++ b/collectors/node.d.plugin/node.d.plugin.in
@@ -0,0 +1,303 @@
+#!/usr/bin/env bash
+':' //; exec "$(command -v nodejs || command -v node || echo "ERROR node IS NOT AVAILABLE IN THIS SYSTEM")" "$0" "$@"
+
+// shebang hack from:
+// http://unix.stackexchange.com/questions/65235/universal-node-js-shebang
+
+// Initially this is run as a shell script.
+// Then, the second line, finds nodejs or node or js in the system path
+// and executes it with the shell parameters.
+
+// netdata
+// real-time performance and health monitoring, done right!
+// (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+// --------------------------------------------------------------------------------------------------------------------
+
+'use strict';
+
+// --------------------------------------------------------------------------------------------------------------------
+// get NETDATA environment variables
+
+var NETDATA_PLUGINS_DIR = process.env.NETDATA_PLUGINS_DIR || __dirname;
+var NETDATA_USER_CONFIG_DIR = process.env.NETDATA_USER_CONFIG_DIR || '@configdir_POST@';
+var NETDATA_STOCK_CONFIG_DIR = process.env.NETDATA_STOCK_CONFIG_DIR || '@libconfigdir_POST@';
+var NETDATA_UPDATE_EVERY = process.env.NETDATA_UPDATE_EVERY || 1;
+var NODE_D_DIR = NETDATA_PLUGINS_DIR + '/../node.d';
+
+// make sure the modules are found
+process.mainModule.paths.unshift(NODE_D_DIR + '/node_modules');
+process.mainModule.paths.unshift(NODE_D_DIR);
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// load required modules
+
+var fs = require('fs');
+var url = require('url');
+var util = require('util');
+var http = require('http');
+var path = require('path');
+var extend = require('extend');
+var netdata = require('netdata');
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// configuration
+
+function netdata_read_json_config_file(module_filename) {
+ var f = path.basename(module_filename);
+
+ var ufilename, sfilename;
+
+ var m = f.match('.plugin' + '$');
+ if(m !== null) {
+ ufilename = netdata.options.paths.config + '/' + f.substring(0, m.index) + '.conf';
+ sfilename = netdata.options.paths.stock_config + '/' + f.substring(0, m.index) + '.conf';
+ }
+
+ m = f.match('.node.js' + '$');
+ if(m !== null) {
+ ufilename = netdata.options.paths.config + '/node.d/' + f.substring(0, m.index) + '.conf';
+ sfilename = netdata.options.paths.stock_config + '/node.d/' + f.substring(0, m.index) + '.conf';
+ }
+
+ try {
+ netdata.debug('loading module\'s ' + module_filename + ' user-config ' + ufilename);
+ return JSON.parse(fs.readFileSync(ufilename, 'utf8'));
+ }
+ catch(e) {
+ netdata.error('Cannot read user-configuration file ' + ufilename + ': ' + e.message + '.');
+ dumpError(e);
+ }
+
+ try {
+ netdata.debug('loading module\'s ' + module_filename + ' stock-config ' + sfilename);
+ return JSON.parse(fs.readFileSync(sfilename, 'utf8'));
+ }
+ catch(e) {
+ netdata.error('Cannot read stock-configuration file ' + sfilename + ': ' + e.message + ', using internal defaults.');
+ dumpError(e);
+ }
+
+ return {};
+}
+
+// internal defaults
+extend(true, netdata.options, {
+ filename: path.basename(__filename),
+
+ update_every: NETDATA_UPDATE_EVERY,
+
+ paths: {
+ plugins: NETDATA_PLUGINS_DIR,
+ config: NETDATA_USER_CONFIG_DIR,
+ stock_config: NETDATA_STOCK_CONFIG_DIR,
+ modules: []
+ },
+
+ modules_enable_autodetect: true,
+ modules_enable_all: true,
+ modules: {}
+});
+
+// load configuration file
+netdata.options_loaded = netdata_read_json_config_file(__filename);
+extend(true, netdata.options, netdata.options_loaded);
+
+if(!netdata.options.paths.plugins)
+ netdata.options.paths.plugins = NETDATA_PLUGINS_DIR;
+
+if(!netdata.options.paths.config)
+ netdata.options.paths.config = NETDATA_USER_CONFIG_DIR;
+
+if(!netdata.options.paths.stock_config)
+ netdata.options.paths.stock_config = NETDATA_STOCK_CONFIG_DIR;
+
+// console.error('merged netdata object:');
+// console.error(util.inspect(netdata, {depth: 10}));
+
+
+// apply module paths to node.js process
+function applyModulePaths() {
+ var len = netdata.options.paths.modules.length;
+ while(len--)
+ process.mainModule.paths.unshift(netdata.options.paths.modules[len]);
+}
+applyModulePaths();
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// tracing
+
+function dumpError(err) {
+ if (typeof err === 'object') {
+ if (err.stack) {
+ netdata.debug(err.stack);
+ }
+ }
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// get command line arguments
+{
+ var found_myself = false;
+ var found_number = false;
+ var found_modules = false;
+ process.argv.forEach(function (val, index, array) {
+ netdata.debug('PARAM: ' + val);
+
+ if(!found_myself) {
+ if(val === __filename)
+ found_myself = true;
+ }
+ else {
+ switch(val) {
+ case 'debug':
+ netdata.options.DEBUG = true;
+ netdata.debug('DEBUG enabled');
+ break;
+
+ default:
+ if(found_number === true) {
+ if(found_modules === false) {
+ for(var i in netdata.options.modules)
+ netdata.options.modules[i].enabled = false;
+ }
+
+ if(typeof netdata.options.modules[val] === 'undefined')
+ netdata.options.modules[val] = {};
+
+ netdata.options.modules[val].enabled = true;
+ netdata.options.modules_enable_all = false;
+ netdata.debug('enabled module ' + val);
+ }
+ else {
+ try {
+ var x = parseInt(val);
+ if(x > 0) {
+ netdata.options.update_every = x;
+ if(netdata.options.update_every < NETDATA_UPDATE_EVERY) {
+ netdata.options.update_every = NETDATA_UPDATE_EVERY;
+ netdata.debug('Update frequency ' + x + 's is too low');
+ }
+
+ found_number = true;
+ netdata.debug('Update frequency set to ' + netdata.options.update_every + ' seconds');
+ }
+ else netdata.error('Ignoring parameter: ' + val);
+ }
+ catch(e) {
+ netdata.error('Cannot get value of parameter: ' + val);
+ dumpError(e);
+ }
+ }
+ break;
+ }
+ }
+ });
+}
+
+if(netdata.options.update_every < 1) {
+ netdata.debug('Adjusting update frequency to 1 second');
+ netdata.options.update_every = 1;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// find modules
+
+function findModules() {
+ var found = 0;
+
+ var files = fs.readdirSync(NODE_D_DIR);
+ var len = files.length;
+ while(len--) {
+ var m = files[len].match('.node.js' + '$');
+ if(m !== null) {
+ var n = files[len].substring(0, m.index);
+
+ if(typeof(netdata.options.modules[n]) === 'undefined')
+ netdata.options.modules[n] = { name: n, enabled: netdata.options.modules_enable_all };
+
+ if(netdata.options.modules[n].enabled === true) {
+ netdata.options.modules[n].name = n;
+ netdata.options.modules[n].filename = NODE_D_DIR + '/' + files[len];
+ netdata.options.modules[n].loaded = false;
+
+ // load the module
+ try {
+ netdata.debug('loading module ' + netdata.options.modules[n].filename);
+ netdata.options.modules[n].module = require(netdata.options.modules[n].filename);
+ netdata.options.modules[n].module.name = n;
+ netdata.debug('loaded module ' + netdata.options.modules[n].name + ' from ' + netdata.options.modules[n].filename);
+ }
+ catch(e) {
+ netdata.options.modules[n].enabled = false;
+ netdata.error('Cannot load module: ' + netdata.options.modules[n].filename + ' exception: ' + e);
+ dumpError(e);
+ continue;
+ }
+
+ // load its configuration
+ var c = {
+ enable_autodetect: netdata.options.modules_enable_autodetect,
+ update_every: netdata.options.update_every
+ };
+
+ var c2 = netdata_read_json_config_file(files[len]);
+ extend(true, c, c2);
+
+ // call module auto-detection / configuration
+ try {
+ netdata.modules_configuring++;
+ netdata.debug('Configuring module ' + netdata.options.modules[n].name);
+ var serv = netdata.configure(netdata.options.modules[n].module, c, function() {
+ netdata.debug('Configured module ' + netdata.options.modules[n].name);
+ netdata.modules_configuring--;
+ });
+
+ netdata.debug('Configuring module ' + netdata.options.modules[n].name + ' reports ' + serv + ' eligible services.');
+ }
+ catch(e) {
+ netdata.modules_configuring--;
+ netdata.options.modules[n].enabled = false;
+ netdata.error('Failed module auto-detection: ' + netdata.options.modules[n].name + ' exception: ' + e + ', disabling module.');
+ dumpError(e);
+ continue;
+ }
+
+ netdata.options.modules[n].loaded = true;
+ found++;
+ }
+ }
+ }
+
+ // netdata.debug(netdata.options.modules);
+ return found;
+}
+
+if(findModules() === 0) {
+ netdata.error('Cannot load any .node.js module from: ' + NODE_D_DIR);
+ netdata.disableNodePlugin();
+ process.exit(1);
+}
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// start
+
+function start_when_configuring_ends() {
+ if(netdata.modules_configuring > 0) {
+ netdata.debug('Waiting modules configuration, still running ' + netdata.modules_configuring);
+ setTimeout(start_when_configuring_ends, 500);
+ return;
+ }
+
+ netdata.modules_configuring = 0;
+ netdata.start();
+}
+start_when_configuring_ends();
+
+//netdata.debug('netdata object:')
+//netdata.debug(netdata);
diff --git a/collectors/node.d.plugin/node_modules/asn1-ber.js b/collectors/node.d.plugin/node_modules/asn1-ber.js
new file mode 100644
index 000000000..55c8f688e
--- /dev/null
+++ b/collectors/node.d.plugin/node_modules/asn1-ber.js
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: MIT
+
+var Ber = require('./lib/ber/index')
+
+exports.Ber = Ber
+exports.BerReader = Ber.Reader
+exports.BerWriter = Ber.Writer
diff --git a/collectors/node.d.plugin/node_modules/extend.js b/collectors/node.d.plugin/node_modules/extend.js
new file mode 100644
index 000000000..3cd2e9155
--- /dev/null
+++ b/collectors/node.d.plugin/node_modules/extend.js
@@ -0,0 +1,88 @@
+// https://github.com/justmoon/node-extend
+// SPDX-License-Identifier: MIT
+
+'use strict';
+
+var hasOwn = Object.prototype.hasOwnProperty;
+var toStr = Object.prototype.toString;
+
+var isArray = function isArray(arr) {
+ if (typeof Array.isArray === 'function') {
+ return Array.isArray(arr);
+ }
+
+ return toStr.call(arr) === '[object Array]';
+};
+
+var isPlainObject = function isPlainObject(obj) {
+ if (!obj || toStr.call(obj) !== '[object Object]') {
+ return false;
+ }
+
+ var hasOwnConstructor = hasOwn.call(obj, 'constructor');
+ var hasIsPrototypeOf = obj.constructor && obj.constructor.prototype && hasOwn.call(obj.constructor.prototype, 'isPrototypeOf');
+ // Not own constructor property must be Object
+ if (obj.constructor && !hasOwnConstructor && !hasIsPrototypeOf) {
+ return false;
+ }
+
+ // Own properties are enumerated firstly, so to speed up,
+ // if last one is own, then all properties are own.
+ var key;
+ for (key in obj) { /**/ }
+
+ return typeof key === 'undefined' || hasOwn.call(obj, key);
+};
+
+module.exports = function extend() {
+ var options, name, src, copy, copyIsArray, clone;
+ var target = arguments[0];
+ var i = 1;
+ var length = arguments.length;
+ var deep = false;
+
+ // Handle a deep copy situation
+ if (typeof target === 'boolean') {
+ deep = target;
+ target = arguments[1] || {};
+ // skip the boolean and the target
+ i = 2;
+ } else if ((typeof target !== 'object' && typeof target !== 'function') || target == null) {
+ target = {};
+ }
+
+ for (; i < length; ++i) {
+ options = arguments[i];
+ // Only deal with non-null/undefined values
+ if (options != null) {
+ // Extend the base object
+ for (name in options) {
+ src = target[name];
+ copy = options[name];
+
+ // Prevent never-ending loop
+ if (target !== copy) {
+ // Recurse if we're merging plain objects or arrays
+ if (deep && copy && (isPlainObject(copy) || (copyIsArray = isArray(copy)))) {
+ if (copyIsArray) {
+ copyIsArray = false;
+ clone = src && isArray(src) ? src : [];
+ } else {
+ clone = src && isPlainObject(src) ? src : {};
+ }
+
+ // Never move original objects, clone them
+ target[name] = extend(deep, clone, copy);
+
+ // Don't bring in undefined values
+ } else if (typeof copy !== 'undefined') {
+ target[name] = copy;
+ }
+ }
+ }
+ }
+ }
+
+ // Return the modified object
+ return target;
+};
diff --git a/collectors/node.d.plugin/node_modules/lib/ber/errors.js b/collectors/node.d.plugin/node_modules/lib/ber/errors.js
new file mode 100644
index 000000000..1c0df7b13
--- /dev/null
+++ b/collectors/node.d.plugin/node_modules/lib/ber/errors.js
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: MIT
+
+module.exports = {
+ InvalidAsn1Error: function(msg) {
+ var e = new Error()
+ e.name = 'InvalidAsn1Error'
+ e.message = msg || ''
+ return e
+ }
+}
diff --git a/collectors/node.d.plugin/node_modules/lib/ber/index.js b/collectors/node.d.plugin/node_modules/lib/ber/index.js
new file mode 100644
index 000000000..eb69ec526
--- /dev/null
+++ b/collectors/node.d.plugin/node_modules/lib/ber/index.js
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: MIT
+
+var errors = require('./errors')
+var types = require('./types')
+
+var Reader = require('./reader')
+var Writer = require('./writer')
+
+for (var t in types)
+ if (types.hasOwnProperty(t))
+ exports[t] = types[t]
+
+for (var e in errors)
+ if (errors.hasOwnProperty(e))
+ exports[e] = errors[e]
+
+exports.Reader = Reader
+exports.Writer = Writer
diff --git a/collectors/node.d.plugin/node_modules/lib/ber/reader.js b/collectors/node.d.plugin/node_modules/lib/ber/reader.js
new file mode 100644
index 000000000..06decf4b9
--- /dev/null
+++ b/collectors/node.d.plugin/node_modules/lib/ber/reader.js
@@ -0,0 +1,270 @@
+// SPDX-License-Identifier: MIT
+
+var assert = require('assert');
+
+var ASN1 = require('./types');
+var errors = require('./errors');
+
+
+///--- Globals
+
+var InvalidAsn1Error = errors.InvalidAsn1Error;
+
+
+
+///--- API
+
+function Reader(data) {
+ if (!data || !Buffer.isBuffer(data))
+ throw new TypeError('data must be a node Buffer');
+
+ this._buf = data;
+ this._size = data.length;
+
+ // These hold the "current" state
+ this._len = 0;
+ this._offset = 0;
+}
+
+Object.defineProperty(Reader.prototype, 'length', {
+ enumerable: true,
+ get: function () { return (this._len); }
+});
+
+Object.defineProperty(Reader.prototype, 'offset', {
+ enumerable: true,
+ get: function () { return (this._offset); }
+});
+
+Object.defineProperty(Reader.prototype, 'remain', {
+ get: function () { return (this._size - this._offset); }
+});
+
+Object.defineProperty(Reader.prototype, 'buffer', {
+ get: function () { return (this._buf.slice(this._offset)); }
+});
+
+
+/**
+ * Reads a single byte and advances offset; you can pass in `true` to make this
+ * a "peek" operation (i.e., get the byte, but don't advance the offset).
+ *
+ * @param {Boolean} peek true means don't move offset.
+ * @return {Number} the next byte, null if not enough data.
+ */
+Reader.prototype.readByte = function(peek) {
+ if (this._size - this._offset < 1)
+ return null;
+
+ var b = this._buf[this._offset] & 0xff;
+
+ if (!peek)
+ this._offset += 1;
+
+ return b;
+};
+
+
+Reader.prototype.peek = function() {
+ return this.readByte(true);
+};
+
+
+/**
+ * Reads a (potentially) variable length off the BER buffer. This call is
+ * not really meant to be called directly, as callers have to manipulate
+ * the internal buffer afterwards.
+ *
+ * As a result of this call, you can call `Reader.length`, until the
+ * next thing called that does a readLength.
+ *
+ * @return {Number} the amount of offset to advance the buffer.
+ * @throws {InvalidAsn1Error} on bad ASN.1
+ */
+Reader.prototype.readLength = function(offset) {
+ if (offset === undefined)
+ offset = this._offset;
+
+ if (offset >= this._size)
+ return null;
+
+ var lenB = this._buf[offset++] & 0xff;
+ if (lenB === null)
+ return null;
+
+ if ((lenB & 0x80) == 0x80) {
+ lenB &= 0x7f;
+
+ if (lenB == 0)
+ throw InvalidAsn1Error('Indefinite length not supported');
+
+ if (lenB > 4)
+ throw InvalidAsn1Error('encoding too long');
+
+ if (this._size - offset < lenB)
+ return null;
+
+ this._len = 0;
+ for (var i = 0; i < lenB; i++)
+ this._len = (this._len << 8) + (this._buf[offset++] & 0xff);
+
+ } else {
+ // Wasn't a variable length
+ this._len = lenB;
+ }
+
+ return offset;
+};
+
+
+/**
+ * Parses the next sequence in this BER buffer.
+ *
+ * To get the length of the sequence, call `Reader.length`.
+ *
+ * @return {Number} the sequence's tag.
+ */
+Reader.prototype.readSequence = function(tag) {
+ var seq = this.peek();
+ if (seq === null)
+ return null;
+ if (tag !== undefined && tag !== seq)
+ throw InvalidAsn1Error('Expected 0x' + tag.toString(16) +
+ ': got 0x' + seq.toString(16));
+
+ var o = this.readLength(this._offset + 1); // stored in `length`
+ if (o === null)
+ return null;
+
+ this._offset = o;
+ return seq;
+};
+
+
+Reader.prototype.readInt = function(tag) {
+ if (typeof(tag) !== 'number')
+ tag = ASN1.Integer;
+
+ return this._readTag(ASN1.Integer);
+};
+
+
+Reader.prototype.readBoolean = function(tag) {
+ if (typeof(tag) !== 'number')
+ tag = ASN1.Boolean;
+
+ return (this._readTag(tag) === 0 ? false : true);
+};
+
+
+Reader.prototype.readEnumeration = function(tag) {
+ if (typeof(tag) !== 'number')
+ tag = ASN1.Enumeration;
+
+ return this._readTag(ASN1.Enumeration);
+};
+
+
+Reader.prototype.readString = function(tag, retbuf) {
+ if (!tag)
+ tag = ASN1.OctetString;
+
+ var b = this.peek();
+ if (b === null)
+ return null;
+
+ if (b !== tag)
+ throw InvalidAsn1Error('Expected 0x' + tag.toString(16) +
+ ': got 0x' + b.toString(16));
+
+ var o = this.readLength(this._offset + 1); // stored in `length`
+
+ if (o === null)
+ return null;
+
+ if (this.length > this._size - o)
+ return null;
+
+ this._offset = o;
+
+ if (this.length === 0)
+ return retbuf ? new Buffer(0) : '';
+
+ var str = this._buf.slice(this._offset, this._offset + this.length);
+ this._offset += this.length;
+
+ return retbuf ? str : str.toString('utf8');
+};
+
+Reader.prototype.readOID = function(tag) {
+ if (!tag)
+ tag = ASN1.OID;
+
+ var b = this.readString(tag, true);
+ if (b === null)
+ return null;
+
+ var values = [];
+ var value = 0;
+
+ for (var i = 0; i < b.length; i++) {
+ var byte = b[i] & 0xff;
+
+ value <<= 7;
+ value += byte & 0x7f;
+ if ((byte & 0x80) == 0) {
+ values.push(value >>> 0);
+ value = 0;
+ }
+ }
+
+ value = values.shift();
+ values.unshift(value % 40);
+ values.unshift((value / 40) >> 0);
+
+ return values.join('.');
+};
+
+
+Reader.prototype._readTag = function(tag) {
+ assert.ok(tag !== undefined);
+
+ var b = this.peek();
+
+ if (b === null)
+ return null;
+
+ if (b !== tag)
+ throw InvalidAsn1Error('Expected 0x' + tag.toString(16) +
+ ': got 0x' + b.toString(16));
+
+ var o = this.readLength(this._offset + 1); // stored in `length`
+ if (o === null)
+ return null;
+
+ if (this.length > 4)
+ throw InvalidAsn1Error('Integer too long: ' + this.length);
+
+ if (this.length > this._size - o)
+ return null;
+ this._offset = o;
+
+ var fb = this._buf[this._offset];
+ var value = 0;
+
+ for (var i = 0; i < this.length; i++) {
+ value <<= 8;
+ value |= (this._buf[this._offset++] & 0xff);
+ }
+
+ if ((fb & 0x80) == 0x80 && i !== 4)
+ value -= (1 << (i * 8));
+
+ return value >> 0;
+};
+
+
+
+///--- Exported API
+
+module.exports = Reader;
diff --git a/collectors/node.d.plugin/node_modules/lib/ber/types.js b/collectors/node.d.plugin/node_modules/lib/ber/types.js
new file mode 100644
index 000000000..7519ddcf5
--- /dev/null
+++ b/collectors/node.d.plugin/node_modules/lib/ber/types.js
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: MIT
+
+module.exports = {
+ EOC: 0,
+ Boolean: 1,
+ Integer: 2,
+ BitString: 3,
+ OctetString: 4,
+ Null: 5,
+ OID: 6,
+ ObjectDescriptor: 7,
+ External: 8,
+ Real: 9,
+ Enumeration: 10,
+ PDV: 11,
+ Utf8String: 12,
+ RelativeOID: 13,
+ Sequence: 16,
+ Set: 17,
+ NumericString: 18,
+ PrintableString: 19,
+ T61String: 20,
+ VideotexString: 21,
+ IA5String: 22,
+ UTCTime: 23,
+ GeneralizedTime: 24,
+ GraphicString: 25,
+ VisibleString: 26,
+ GeneralString: 28,
+ UniversalString: 29,
+ CharacterString: 30,
+ BMPString: 31,
+ Constructor: 32,
+ Context: 128
+}
diff --git a/collectors/node.d.plugin/node_modules/lib/ber/writer.js b/collectors/node.d.plugin/node_modules/lib/ber/writer.js
new file mode 100644
index 000000000..d3a718f14
--- /dev/null
+++ b/collectors/node.d.plugin/node_modules/lib/ber/writer.js
@@ -0,0 +1,318 @@
+// SPDX-License-Identifier: MIT
+
+var assert = require('assert');
+var ASN1 = require('./types');
+var errors = require('./errors');
+
+
+///--- Globals
+
+var InvalidAsn1Error = errors.InvalidAsn1Error;
+
+var DEFAULT_OPTS = {
+ size: 1024,
+ growthFactor: 8
+};
+
+
+///--- Helpers
+
+function merge(from, to) {
+ assert.ok(from);
+ assert.equal(typeof(from), 'object');
+ assert.ok(to);
+ assert.equal(typeof(to), 'object');
+
+ var keys = Object.getOwnPropertyNames(from);
+ keys.forEach(function(key) {
+ if (to[key])
+ return;
+
+ var value = Object.getOwnPropertyDescriptor(from, key);
+ Object.defineProperty(to, key, value);
+ });
+
+ return to;
+}
+
+
+
+///--- API
+
+function Writer(options) {
+ options = merge(DEFAULT_OPTS, options || {});
+
+ this._buf = new Buffer(options.size || 1024);
+ this._size = this._buf.length;
+ this._offset = 0;
+ this._options = options;
+
+ // A list of offsets in the buffer where we need to insert
+ // sequence tag/len pairs.
+ this._seq = [];
+}
+
+Object.defineProperty(Writer.prototype, 'buffer', {
+ get: function () {
+ if (this._seq.length)
+ throw new InvalidAsn1Error(this._seq.length + ' unended sequence(s)');
+
+ return (this._buf.slice(0, this._offset));
+ }
+});
+
+Writer.prototype.writeByte = function(b) {
+ if (typeof(b) !== 'number')
+ throw new TypeError('argument must be a Number');
+
+ this._ensure(1);
+ this._buf[this._offset++] = b;
+};
+
+
+Writer.prototype.writeInt = function(i, tag) {
+ if (typeof(i) !== 'number')
+ throw new TypeError('argument must be a Number');
+ if (typeof(tag) !== 'number')
+ tag = ASN1.Integer;
+
+ var sz = 4;
+
+ while ((((i & 0xff800000) === 0) || ((i & 0xff800000) === 0xff800000 >> 0)) &&
+ (sz > 1)) {
+ sz--;
+ i <<= 8;
+ }
+
+ if (sz > 4)
+ throw new InvalidAsn1Error('BER ints cannot be > 0xffffffff');
+
+ this._ensure(2 + sz);
+ this._buf[this._offset++] = tag;
+ this._buf[this._offset++] = sz;
+
+ while (sz-- > 0) {
+ this._buf[this._offset++] = ((i & 0xff000000) >>> 24);
+ i <<= 8;
+ }
+
+};
+
+
+Writer.prototype.writeNull = function() {
+ this.writeByte(ASN1.Null);
+ this.writeByte(0x00);
+};
+
+
+Writer.prototype.writeEnumeration = function(i, tag) {
+ if (typeof(i) !== 'number')
+ throw new TypeError('argument must be a Number');
+ if (typeof(tag) !== 'number')
+ tag = ASN1.Enumeration;
+
+ return this.writeInt(i, tag);
+};
+
+
+Writer.prototype.writeBoolean = function(b, tag) {
+ if (typeof(b) !== 'boolean')
+ throw new TypeError('argument must be a Boolean');
+ if (typeof(tag) !== 'number')
+ tag = ASN1.Boolean;
+
+ this._ensure(3);
+ this._buf[this._offset++] = tag;
+ this._buf[this._offset++] = 0x01;
+ this._buf[this._offset++] = b ? 0xff : 0x00;
+};
+
+
+Writer.prototype.writeString = function(s, tag) {
+ if (typeof(s) !== 'string')
+ throw new TypeError('argument must be a string (was: ' + typeof(s) + ')');
+ if (typeof(tag) !== 'number')
+ tag = ASN1.OctetString;
+
+ var len = Buffer.byteLength(s);
+ this.writeByte(tag);
+ this.writeLength(len);
+ if (len) {
+ this._ensure(len);
+ this._buf.write(s, this._offset);
+ this._offset += len;
+ }
+};
+
+
+Writer.prototype.writeBuffer = function(buf, tag) {
+ if (!Buffer.isBuffer(buf))
+ throw new TypeError('argument must be a buffer');
+
+ // If no tag is specified we will assume `buf` already contains tag and length
+ if (typeof(tag) === 'number') {
+ this.writeByte(tag);
+ this.writeLength(buf.length);
+ }
+
+ this._ensure(buf.length);
+ buf.copy(this._buf, this._offset, 0, buf.length);
+ this._offset += buf.length;
+};
+
+
+Writer.prototype.writeStringArray = function(strings, tag) {
+ if (! (strings instanceof Array))
+ throw new TypeError('argument must be an Array[String]');
+
+ var self = this;
+ strings.forEach(function(s) {
+ self.writeString(s, tag);
+ });
+};
+
+// This is really to solve DER cases, but whatever for now
+Writer.prototype.writeOID = function(s, tag) {
+ if (typeof(s) !== 'string')
+ throw new TypeError('argument must be a string');
+ if (typeof(tag) !== 'number')
+ tag = ASN1.OID;
+
+ if (!/^([0-9]+\.){3,}[0-9]+$/.test(s))
+ throw new Error('argument is not a valid OID string');
+
+ function encodeOctet(bytes, octet) {
+ if (octet < 128) {
+ bytes.push(octet);
+ } else if (octet < 16384) {
+ bytes.push((octet >>> 7) | 0x80);
+ bytes.push(octet & 0x7F);
+ } else if (octet < 2097152) {
+ bytes.push((octet >>> 14) | 0x80);
+ bytes.push(((octet >>> 7) | 0x80) & 0xFF);
+ bytes.push(octet & 0x7F);
+ } else if (octet < 268435456) {
+ bytes.push((octet >>> 21) | 0x80);
+ bytes.push(((octet >>> 14) | 0x80) & 0xFF);
+ bytes.push(((octet >>> 7) | 0x80) & 0xFF);
+ bytes.push(octet & 0x7F);
+ } else {
+ bytes.push(((octet >>> 28) | 0x80) & 0xFF);
+ bytes.push(((octet >>> 21) | 0x80) & 0xFF);
+ bytes.push(((octet >>> 14) | 0x80) & 0xFF);
+ bytes.push(((octet >>> 7) | 0x80) & 0xFF);
+ bytes.push(octet & 0x7F);
+ }
+ }
+
+ var tmp = s.split('.');
+ var bytes = [];
+ bytes.push(parseInt(tmp[0], 10) * 40 + parseInt(tmp[1], 10));
+ tmp.slice(2).forEach(function(b) {
+ encodeOctet(bytes, parseInt(b, 10));
+ });
+
+ var self = this;
+ this._ensure(2 + bytes.length);
+ this.writeByte(tag);
+ this.writeLength(bytes.length);
+ bytes.forEach(function(b) {
+ self.writeByte(b);
+ });
+};
+
+
+Writer.prototype.writeLength = function(len) {
+ if (typeof(len) !== 'number')
+ throw new TypeError('argument must be a Number');
+
+ this._ensure(4);
+
+ if (len <= 0x7f) {
+ this._buf[this._offset++] = len;
+ } else if (len <= 0xff) {
+ this._buf[this._offset++] = 0x81;
+ this._buf[this._offset++] = len;
+ } else if (len <= 0xffff) {
+ this._buf[this._offset++] = 0x82;
+ this._buf[this._offset++] = len >> 8;
+ this._buf[this._offset++] = len;
+ } else if (len <= 0xffffff) {
+ this._buf[this._offset++] = 0x83;
+ this._buf[this._offset++] = len >> 16;
+ this._buf[this._offset++] = len >> 8;
+ this._buf[this._offset++] = len;
+ } else {
+ throw new InvalidAsn1Error('Length too long (> 4 bytes)');
+ }
+};
+
+Writer.prototype.startSequence = function(tag) {
+ if (typeof(tag) !== 'number')
+ tag = ASN1.Sequence | ASN1.Constructor;
+
+ this.writeByte(tag);
+ this._seq.push(this._offset);
+ this._ensure(3);
+ this._offset += 3;
+};
+
+
+Writer.prototype.endSequence = function() {
+ var seq = this._seq.pop();
+ var start = seq + 3;
+ var len = this._offset - start;
+
+ if (len <= 0x7f) {
+ this._shift(start, len, -2);
+ this._buf[seq] = len;
+ } else if (len <= 0xff) {
+ this._shift(start, len, -1);
+ this._buf[seq] = 0x81;
+ this._buf[seq + 1] = len;
+ } else if (len <= 0xffff) {
+ this._buf[seq] = 0x82;
+ this._buf[seq + 1] = len >> 8;
+ this._buf[seq + 2] = len;
+ } else if (len <= 0xffffff) {
+ this._shift(start, len, 1);
+ this._buf[seq] = 0x83;
+ this._buf[seq + 1] = len >> 16;
+ this._buf[seq + 2] = len >> 8;
+ this._buf[seq + 3] = len;
+ } else {
+ throw new InvalidAsn1Error('Sequence too long');
+ }
+};
+
+
+Writer.prototype._shift = function(start, len, shift) {
+ assert.ok(start !== undefined);
+ assert.ok(len !== undefined);
+ assert.ok(shift);
+
+ this._buf.copy(this._buf, start + shift, start, start + len);
+ this._offset += shift;
+};
+
+Writer.prototype._ensure = function(len) {
+ assert.ok(len);
+
+ if (this._size - this._offset < len) {
+ var sz = this._size * this._options.growthFactor;
+ if (sz - this._offset < len)
+ sz += len;
+
+ var buf = new Buffer(sz);
+
+ this._buf.copy(buf, 0, 0, this._offset);
+ this._buf = buf;
+ this._size = sz;
+ }
+};
+
+
+
+///--- Exported API
+
+module.exports = Writer;
diff --git a/collectors/node.d.plugin/node_modules/net-snmp.js b/collectors/node.d.plugin/node_modules/net-snmp.js
new file mode 100644
index 000000000..484597dcb
--- /dev/null
+++ b/collectors/node.d.plugin/node_modules/net-snmp.js
@@ -0,0 +1,1465 @@
+
+// Copyright 2013 Stephen Vickers <stephen.vickers.sv@gmail.com>
+// SPDX-License-Identifier: MIT
+
+var ber = require ("asn1-ber").Ber;
+var dgram = require ("dgram");
+var events = require ("events");
+var util = require ("util");
+
+/*****************************************************************************
+ ** Constants
+ **/
+
+function _expandConstantObject (object) {
+ var keys = [];
+ for (var key in object)
+ keys.push (key);
+ for (var i = 0; i < keys.length; i++)
+ object[object[keys[i]]] = parseInt (keys[i]);
+}
+
+var ErrorStatus = {
+ 0: "NoError",
+ 1: "TooBig",
+ 2: "NoSuchName",
+ 3: "BadValue",
+ 4: "ReadOnly",
+ 5: "GeneralError",
+ 6: "NoAccess",
+ 7: "WrongType",
+ 8: "WrongLength",
+ 9: "WrongEncoding",
+ 10: "WrongValue",
+ 11: "NoCreation",
+ 12: "InconsistentValue",
+ 13: "ResourceUnavailable",
+ 14: "CommitFailed",
+ 15: "UndoFailed",
+ 16: "AuthorizationError",
+ 17: "NotWritable",
+ 18: "InconsistentName"
+};
+
+_expandConstantObject (ErrorStatus);
+
+var ObjectType = {
+ 1: "Boolean",
+ 2: "Integer",
+ 4: "OctetString",
+ 5: "Null",
+ 6: "OID",
+ 64: "IpAddress",
+ 65: "Counter",
+ 66: "Gauge",
+ 67: "TimeTicks",
+ 68: "Opaque",
+ 70: "Counter64",
+ 128: "NoSuchObject",
+ 129: "NoSuchInstance",
+ 130: "EndOfMibView"
+};
+
+_expandConstantObject (ObjectType);
+
+ObjectType.Integer32 = ObjectType.Integer;
+ObjectType.Counter32 = ObjectType.Counter;
+ObjectType.Gauge32 = ObjectType.Gauge;
+ObjectType.Unsigned32 = ObjectType.Gauge32;
+
+var PduType = {
+ 160: "GetRequest",
+ 161: "GetNextRequest",
+ 162: "GetResponse",
+ 163: "SetRequest",
+ 164: "Trap",
+ 165: "GetBulkRequest",
+ 166: "InformRequest",
+ 167: "TrapV2",
+ 168: "Report"
+};
+
+_expandConstantObject (PduType);
+
+var TrapType = {
+ 0: "ColdStart",
+ 1: "WarmStart",
+ 2: "LinkDown",
+ 3: "LinkUp",
+ 4: "AuthenticationFailure",
+ 5: "EgpNeighborLoss",
+ 6: "EnterpriseSpecific"
+};
+
+_expandConstantObject (TrapType);
+
+var Version1 = 0;
+var Version2c = 1;
+
+/*****************************************************************************
+ ** Exception class definitions
+ **/
+
+function ResponseInvalidError (message) {
+ this.name = "ResponseInvalidError";
+ this.message = message;
+ Error.captureStackTrace(this, ResponseInvalidError);
+}
+util.inherits (ResponseInvalidError, Error);
+
+function RequestInvalidError (message) {
+ this.name = "RequestInvalidError";
+ this.message = message;
+ Error.captureStackTrace(this, RequestInvalidError);
+}
+util.inherits (RequestInvalidError, Error);
+
+function RequestFailedError (message, status) {
+ this.name = "RequestFailedError";
+ this.message = message;
+ this.status = status;
+ Error.captureStackTrace(this, RequestFailedError);
+}
+util.inherits (RequestFailedError, Error);
+
+function RequestTimedOutError (message) {
+ this.name = "RequestTimedOutError";
+ this.message = message;
+ Error.captureStackTrace(this, RequestTimedOutError);
+}
+util.inherits (RequestTimedOutError, Error);
+
+/*****************************************************************************
+ ** OID and varbind helper functions
+ **/
+
+function isVarbindError (varbind) {
+ return !!(varbind.type == ObjectType.NoSuchObject
+ || varbind.type == ObjectType.NoSuchInstance
+ || varbind.type == ObjectType.EndOfMibView);
+}
+
+function varbindError (varbind) {
+ return (ObjectType[varbind.type] || "NotAnError") + ": " + varbind.oid;
+}
+
+function oidFollowsOid (oidString, nextString) {
+ var oid = {str: oidString, len: oidString.length, idx: 0};
+ var next = {str: nextString, len: nextString.length, idx: 0};
+ var dotCharCode = ".".charCodeAt (0);
+
+ function getNumber (item) {
+ var n = 0;
+ if (item.idx >= item.len)
+ return null;
+ while (item.idx < item.len) {
+ var charCode = item.str.charCodeAt (item.idx++);
+ if (charCode == dotCharCode)
+ return n;
+ n = (n ? (n * 10) : n) + (charCode - 48);
+ }
+ return n;
+ }
+
+ while (1) {
+ var oidNumber = getNumber (oid);
+ var nextNumber = getNumber (next);
+
+ if (oidNumber !== null) {
+ if (nextNumber !== null) {
+ if (nextNumber > oidNumber) {
+ return true;
+ } else if (nextNumber < oidNumber) {
+ return false;
+ }
+ } else {
+ return true;
+ }
+ } else {
+ return true;
+ }
+ }
+}
+
+function oidInSubtree (oidString, nextString) {
+ var oid = oidString.split (".");
+ var next = nextString.split (".");
+
+ if (oid.length > next.length)
+ return false;
+
+ for (var i = 0; i < oid.length; i++) {
+ if (next[i] != oid[i])
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ ** Some SNMP agents produce integers on the wire such as 00 ff ff ff ff.
+ ** The ASN.1 BER parser we use throws an error when parsing this, which we
+ ** believe is correct. So, we decided not to bother the "asn1" developer(s)
+ ** with this, instead opting to work around it here.
+ **
+ ** If an integer is 5 bytes in length we check if the first byte is 0, and if so
+ ** simply drop it and parse it like it was a 4 byte integer, otherwise throw
+ ** an error since the integer is too large.
+ **/
+
+function readInt (buffer) {
+ return readUint (buffer, true);
+}
+
+function readUint (buffer, isSigned) {
+ buffer.readByte ();
+ var length = buffer.readByte ();
+ var value = 0;
+ var signedBitSet = false;
+
+ if (length > 5) {
+ throw new RangeError ("Integer too long '" + length + "'");
+ } else if (length == 5) {
+ if (buffer.readByte () !== 0)
+ throw new RangeError ("Integer too long '" + length + "'");
+ length = 4;
+ }
+
+ for (var i = 0; i < length; i++) {
+ value *= 256;
+ value += buffer.readByte ();
+
+ if (isSigned && i <= 0) {
+ if ((value & 0x80) == 0x80)
+ signedBitSet = true;
+ }
+ }
+
+ if (signedBitSet)
+ value -= (1 << (i * 8));
+
+ return value;
+}
+
+function readUint64 (buffer) {
+ var value = buffer.readString (ObjectType.Counter64, true);
+
+ return value;
+}
+
+function readVarbinds (buffer, varbinds) {
+ buffer.readSequence ();
+
+ while (1) {
+ buffer.readSequence ();
+ var oid = buffer.readOID ();
+ var type = buffer.peek ();
+
+ if (type == null)
+ break;
+
+ var value;
+
+ if (type == ObjectType.Boolean) {
+ value = buffer.readBoolean ();
+ } else if (type == ObjectType.Integer) {
+ value = readInt (buffer);
+ } else if (type == ObjectType.OctetString) {
+ value = buffer.readString (null, true);
+ } else if (type == ObjectType.Null) {
+ buffer.readByte ();
+ buffer.readByte ();
+ value = null;
+ } else if (type == ObjectType.OID) {
+ value = buffer.readOID ();
+ } else if (type == ObjectType.IpAddress) {
+ var bytes = buffer.readString (ObjectType.IpAddress, true);
+ if (bytes.length != 4)
+ throw new ResponseInvalidError ("Length '" + bytes.length
+ + "' of IP address '" + bytes.toString ("hex")
+ + "' is not 4");
+ value = bytes[0] + "." + bytes[1] + "." + bytes[2] + "." + bytes[3];
+ } else if (type == ObjectType.Counter) {
+ value = readUint (buffer);
+ } else if (type == ObjectType.Gauge) {
+ value = readUint (buffer);
+ } else if (type == ObjectType.TimeTicks) {
+ value = readUint (buffer);
+ } else if (type == ObjectType.Opaque) {
+ value = buffer.readString (ObjectType.Opaque, true);
+ } else if (type == ObjectType.Counter64) {
+ value = readUint64 (buffer);
+ } else if (type == ObjectType.NoSuchObject) {
+ buffer.readByte ();
+ buffer.readByte ();
+ value = null;
+ } else if (type == ObjectType.NoSuchInstance) {
+ buffer.readByte ();
+ buffer.readByte ();
+ value = null;
+ } else if (type == ObjectType.EndOfMibView) {
+ buffer.readByte ();
+ buffer.readByte ();
+ value = null;
+ } else {
+ throw new ResponseInvalidError ("Unknown type '" + type
+ + "' in response");
+ }
+
+ varbinds.push ({
+ oid: oid,
+ type: type,
+ value: value
+ });
+ }
+}
+
+function writeUint (buffer, type, value) {
+ var b = new Buffer (4);
+ b.writeUInt32BE (value, 0);
+ buffer.writeBuffer (b, type);
+}
+
+function writeUint64 (buffer, value) {
+ buffer.writeBuffer (value, ObjectType.Counter64);
+}
+
+function writeVarbinds (buffer, varbinds) {
+ buffer.startSequence ();
+ for (var i = 0; i < varbinds.length; i++) {
+ buffer.startSequence ();
+ buffer.writeOID (varbinds[i].oid);
+
+ if (varbinds[i].type && varbinds[i].hasOwnProperty("value")) {
+ var type = varbinds[i].type;
+ var value = varbinds[i].value;
+
+ if (type == ObjectType.Boolean) {
+ buffer.writeBoolean (value ? true : false);
+ } else if (type == ObjectType.Integer) { // also Integer32
+ buffer.writeInt (value);
+ } else if (type == ObjectType.OctetString) {
+ if (typeof value == "string")
+ buffer.writeString (value);
+ else
+ buffer.writeBuffer (value, ObjectType.OctetString);
+ } else if (type == ObjectType.Null) {
+ buffer.writeNull ();
+ } else if (type == ObjectType.OID) {
+ buffer.writeOID (value);
+ } else if (type == ObjectType.IpAddress) {
+ var bytes = value.split (".");
+ if (bytes.length != 4)
+ throw new RequestInvalidError ("Invalid IP address '"
+ + value + "'");
+ buffer.writeBuffer (new Buffer (bytes), 64);
+ } else if (type == ObjectType.Counter) { // also Counter32
+ writeUint (buffer, ObjectType.Counter, value);
+ } else if (type == ObjectType.Gauge) { // also Gauge32 & Unsigned32
+ writeUint (buffer, ObjectType.Gauge, value);
+ } else if (type == ObjectType.TimeTicks) {
+ writeUint (buffer, ObjectType.TimeTicks, value);
+ } else if (type == ObjectType.Opaque) {
+ buffer.writeBuffer (value, ObjectType.Opaque);
+ } else if (type == ObjectType.Counter64) {
+ writeUint64 (buffer, value);
+ } else {
+ throw new RequestInvalidError ("Unknown type '" + type
+ + "' in request");
+ }
+ } else {
+ buffer.writeNull ();
+ }
+
+ buffer.endSequence ();
+ }
+ buffer.endSequence ();
+}
+
+/*****************************************************************************
+ ** PDU class definitions
+ **/
+
+var SimplePdu = function (id, varbinds, options) {
+ this.id = id;
+ this.varbinds = varbinds;
+ this.options = options || {};
+};
+
+SimplePdu.prototype.toBuffer = function (buffer) {
+ buffer.startSequence (this.type);
+
+ buffer.writeInt (this.id);
+ buffer.writeInt ((this.type == PduType.GetBulkRequest)
+ ? (this.options.nonRepeaters || 0)
+ : 0);
+ buffer.writeInt ((this.type == PduType.GetBulkRequest)
+ ? (this.options.maxRepetitions || 0)
+ : 0);
+
+ writeVarbinds (buffer, this.varbinds);
+
+ buffer.endSequence ();
+};
+
+var GetBulkRequestPdu = function () {
+ this.type = PduType.GetBulkRequest;
+ GetBulkRequestPdu.super_.apply (this, arguments);
+};
+
+util.inherits (GetBulkRequestPdu, SimplePdu);
+
+var GetNextRequestPdu = function () {
+ this.type = PduType.GetNextRequest;
+ GetNextRequestPdu.super_.apply (this, arguments);
+};
+
+util.inherits (GetNextRequestPdu, SimplePdu);
+
+var GetResponsePdu = function (buffer) {
+ this.type = PduType.GetResponse;
+
+ buffer.readSequence (this.type);
+
+ this.id = buffer.readInt ();
+
+ this.errorStatus = buffer.readInt ();
+ this.errorIndex = buffer.readInt ();
+
+ this.varbinds = [];
+
+ readVarbinds (buffer, this.varbinds);
+};
+
+var GetRequestPdu = function () {
+ this.type = PduType.GetRequest;
+ GetRequestPdu.super_.apply (this, arguments);
+};
+
+util.inherits (GetRequestPdu, SimplePdu);
+
+var InformRequestPdu = function () {
+ this.type = PduType.InformRequest;
+ InformRequestPdu.super_.apply (this, arguments);
+};
+
+util.inherits (InformRequestPdu, SimplePdu);
+
+var SetRequestPdu = function () {
+ this.type = PduType.SetRequest;
+ SetRequestPdu.super_.apply (this, arguments);
+};
+
+util.inherits (SetRequestPdu, SimplePdu);
+
+var TrapPdu = function (typeOrOid, varbinds, options) {
+ this.type = PduType.Trap;
+
+ this.agentAddr = options.agentAddr || "127.0.0.1";
+ this.upTime = options.upTime;
+
+ if (typeof typeOrOid == "string") {
+ this.generic = TrapType.EnterpriseSpecific;
+ this.specific = parseInt (typeOrOid.match (/\.(\d+)$/)[1]);
+ this.enterprise = typeOrOid.replace (/\.(\d+)$/, "");
+ } else {
+ this.generic = typeOrOid;
+ this.specific = 0;
+ this.enterprise = "1.3.6.1.4.1";
+ }
+
+ this.varbinds = varbinds;
+};
+
+TrapPdu.prototype.toBuffer = function (buffer) {
+ buffer.startSequence (this.type);
+
+ buffer.writeOID (this.enterprise);
+ buffer.writeBuffer (new Buffer (this.agentAddr.split (".")),
+ ObjectType.IpAddress);
+ buffer.writeInt (this.generic);
+ buffer.writeInt (this.specific);
+ writeUint (buffer, ObjectType.TimeTicks,
+ this.upTime || Math.floor (process.uptime () * 100));
+
+ writeVarbinds (buffer, this.varbinds);
+
+ buffer.endSequence ();
+};
+
+var TrapV2Pdu = function () {
+ this.type = PduType.TrapV2;
+ TrapV2Pdu.super_.apply (this, arguments);
+};
+
+util.inherits (TrapV2Pdu, SimplePdu);
+
+/*****************************************************************************
+ ** Message class definitions
+ **/
+
+var RequestMessage = function (version, community, pdu) {
+ this.version = version;
+ this.community = community;
+ this.pdu = pdu;
+};
+
+RequestMessage.prototype.toBuffer = function () {
+ if (this.buffer)
+ return this.buffer;
+
+ var writer = new ber.Writer ();
+
+ writer.startSequence ();
+
+ writer.writeInt (this.version);
+ writer.writeString (this.community);
+
+ this.pdu.toBuffer (writer);
+
+ writer.endSequence ();
+
+ this.buffer = writer.buffer;
+
+ return this.buffer;
+};
+
+var ResponseMessage = function (buffer) {
+ var reader = new ber.Reader (buffer);
+
+ reader.readSequence ();
+
+ this.version = reader.readInt ();
+ this.community = reader.readString ();
+
+ var type = reader.peek ();
+
+ if (type == PduType.GetResponse) {
+ this.pdu = new GetResponsePdu (reader);
+ } else {
+ throw new ResponseInvalidError ("Unknown PDU type '" + type
+ + "' in response");
+ }
+};
+
+/*****************************************************************************
+ ** Session class definition
+ **/
+
+var Session = function (target, community, options) {
+ this.target = target || "127.0.0.1";
+ this.community = community || "public";
+
+ this.version = (options && options.version)
+ ? options.version
+ : Version1;
+
+ this.transport = (options && options.transport)
+ ? options.transport
+ : "udp4";
+ this.port = (options && options.port )
+ ? options.port
+ : 161;
+ this.trapPort = (options && options.trapPort )
+ ? options.trapPort
+ : 162;
+
+ this.retries = (options && (options.retries || options.retries == 0))
+ ? options.retries
+ : 1;
+ this.timeout = (options && options.timeout)
+ ? options.timeout
+ : 5000;
+
+ this.sourceAddress = (options && options.sourceAddress )
+ ? options.sourceAddress
+ : undefined;
+ this.sourcePort = (options && options.sourcePort )
+ ? parseInt(options.sourcePort)
+ : undefined;
+
+ this.idBitsSize = (options && options.idBitsSize)
+ ? parseInt(options.idBitsSize)
+ : 32;
+
+ this.reqs = {};
+ this.reqCount = 0;
+
+ this.dgram = dgram.createSocket (this.transport);
+ this.dgram.unref();
+
+ var me = this;
+ this.dgram.on ("message", me.onMsg.bind (me));
+ this.dgram.on ("close", me.onClose.bind (me));
+ this.dgram.on ("error", me.onError.bind (me));
+
+ if (this.sourceAddress || this.sourcePort)
+ this.dgram.bind (this.sourcePort, this.sourceAddress);
+};
+
+util.inherits (Session, events.EventEmitter);
+
+Session.prototype.close = function () {
+ this.dgram.close ();
+ return this;
+};
+
+Session.prototype.cancelRequests = function (error) {
+ var id;
+ for (id in this.reqs) {
+ var req = this.reqs[id];
+ this.unregisterRequest (req.id);
+ req.responseCb (error);
+ }
+};
+
+function _generateId (bitSize) {
+ if (bitSize === 16) {
+ return Math.floor(Math.random() * 10000) % 65535;
+ }
+ return Math.floor(Math.random() * 100000000) % 4294967295;
+}
+
+Session.prototype.get = function (oids, responseCb) {
+ function feedCb (req, message) {
+ var pdu = message.pdu;
+ var varbinds = [];
+
+ if (req.message.pdu.varbinds.length != pdu.varbinds.length) {
+ req.responseCb (new ResponseInvalidError ("Requested OIDs do not "
+ + "match response OIDs"));
+ } else {
+ for (var i = 0; i < req.message.pdu.varbinds.length; i++) {
+ if (req.message.pdu.varbinds[i].oid != pdu.varbinds[i].oid) {
+ req.responseCb (new ResponseInvalidError ("OID '"
+ + req.message.pdu.varbinds[i].oid
+ + "' in request at positiion '" + i + "' does not "
+ + "match OID '" + pdu.varbinds[i].oid + "' in response "
+ + "at position '" + i + "'"));
+ return;
+ } else {
+ varbinds.push (pdu.varbinds[i]);
+ }
+ }
+
+ req.responseCb (null, varbinds);
+ }
+ }
+
+ var pduVarbinds = [];
+
+ for (var i = 0; i < oids.length; i++) {
+ var varbind = {
+ oid: oids[i]
+ };
+ pduVarbinds.push (varbind);
+ }
+
+ this.simpleGet (GetRequestPdu, feedCb, pduVarbinds, responseCb);
+
+ return this;
+};
+
+Session.prototype.getBulk = function () {
+ var oids, nonRepeaters, maxRepetitions, responseCb;
+
+ if (arguments.length >= 4) {
+ oids = arguments[0];
+ nonRepeaters = arguments[1];
+ maxRepetitions = arguments[2];
+ responseCb = arguments[3];
+ } else if (arguments.length >= 3) {
+ oids = arguments[0];
+ nonRepeaters = arguments[1];
+ maxRepetitions = 10;
+ responseCb = arguments[2];
+ } else {
+ oids = arguments[0];
+ nonRepeaters = 0;
+ maxRepetitions = 10;
+ responseCb = arguments[1];
+ }
+
+ function feedCb (req, message) {
+ var pdu = message.pdu;
+ var varbinds = [];
+ var i = 0;
+
+ // first walk through and grab non-repeaters
+ if (pdu.varbinds.length < nonRepeaters) {
+ req.responseCb (new ResponseInvalidError ("Varbind count in "
+ + "response '" + pdu.varbinds.length + "' is less than "
+ + "non-repeaters '" + nonRepeaters + "' in request"));
+ } else {
+ for ( ; i < nonRepeaters; i++) {
+ if (isVarbindError (pdu.varbinds[i])) {
+ varbinds.push (pdu.varbinds[i]);
+ } else if (! oidFollowsOid (req.message.pdu.varbinds[i].oid,
+ pdu.varbinds[i].oid)) {
+ req.responseCb (new ResponseInvalidError ("OID '"
+ + req.message.pdu.varbinds[i].oid + "' in request at "
+ + "positiion '" + i + "' does not precede "
+ + "OID '" + pdu.varbinds[i].oid + "' in response "
+ + "at position '" + i + "'"));
+ return;
+ } else {
+ varbinds.push (pdu.varbinds[i]);
+ }
+ }
+ }
+
+ var repeaters = req.message.pdu.varbinds.length - nonRepeaters;
+
+ // secondly walk through and grab repeaters
+ if (pdu.varbinds.length % (repeaters)) {
+ req.responseCb (new ResponseInvalidError ("Varbind count in "
+ + "response '" + pdu.varbinds.length + "' is not a "
+ + "multiple of repeaters '" + repeaters
+ + "' plus non-repeaters '" + nonRepeaters + "' in request"));
+ } else {
+ while (i < pdu.varbinds.length) {
+ for (var j = 0; j < repeaters; j++, i++) {
+ var reqIndex = nonRepeaters + j;
+ var respIndex = i;
+
+ if (isVarbindError (pdu.varbinds[respIndex])) {
+ if (! varbinds[reqIndex])
+ varbinds[reqIndex] = [];
+ varbinds[reqIndex].push (pdu.varbinds[respIndex]);
+ } else if (! oidFollowsOid (
+ req.message.pdu.varbinds[reqIndex].oid,
+ pdu.varbinds[respIndex].oid)) {
+ req.responseCb (new ResponseInvalidError ("OID '"
+ + req.message.pdu.varbinds[reqIndex].oid
+ + "' in request at positiion '" + (reqIndex)
+ + "' does not precede OID '"
+ + pdu.varbinds[respIndex].oid
+ + "' in response at position '" + (respIndex) + "'"));
+ return;
+ } else {
+ if (! varbinds[reqIndex])
+ varbinds[reqIndex] = [];
+ varbinds[reqIndex].push (pdu.varbinds[respIndex]);
+ }
+ }
+ }
+ }
+
+ req.responseCb (null, varbinds);
+ }
+
+ var pduVarbinds = [];
+
+ for (var i = 0; i < oids.length; i++) {
+ var varbind = {
+ oid: oids[i]
+ };
+ pduVarbinds.push (varbind);
+ }
+
+ var options = {
+ nonRepeaters: nonRepeaters,
+ maxRepetitions: maxRepetitions
+ };
+
+ this.simpleGet (GetBulkRequestPdu, feedCb, pduVarbinds, responseCb,
+ options);
+
+ return this;
+};
+
+Session.prototype.getNext = function (oids, responseCb) {
+ function feedCb (req, message) {
+ var pdu = message.pdu;
+ var varbinds = [];
+
+ if (req.message.pdu.varbinds.length != pdu.varbinds.length) {
+ req.responseCb (new ResponseInvalidError ("Requested OIDs do not "
+ + "match response OIDs"));
+ } else {
+ for (var i = 0; i < req.message.pdu.varbinds.length; i++) {
+ if (isVarbindError (pdu.varbinds[i])) {
+ varbinds.push (pdu.varbinds[i]);
+ } else if (! oidFollowsOid (req.message.pdu.varbinds[i].oid,
+ pdu.varbinds[i].oid)) {
+ req.responseCb (new ResponseInvalidError ("OID '"
+ + req.message.pdu.varbinds[i].oid + "' in request at "
+ + "positiion '" + i + "' does not precede "
+ + "OID '" + pdu.varbinds[i].oid + "' in response "
+ + "at position '" + i + "'"));
+ return;
+ } else {
+ varbinds.push (pdu.varbinds[i]);
+ }
+ }
+
+ req.responseCb (null, varbinds);
+ }
+ }
+
+ var pduVarbinds = [];
+
+ for (var i = 0; i < oids.length; i++) {
+ var varbind = {
+ oid: oids[i]
+ };
+ pduVarbinds.push (varbind);
+ }
+
+ this.simpleGet (GetNextRequestPdu, feedCb, pduVarbinds, responseCb);
+
+ return this;
+};
+
+Session.prototype.inform = function () {
+ var typeOrOid = arguments[0];
+ var varbinds, options = {}, responseCb;
+
+ /**
+ ** Support the following signatures:
+ **
+ ** typeOrOid, varbinds, options, callback
+ ** typeOrOid, varbinds, callback
+ ** typeOrOid, options, callback
+ ** typeOrOid, callback
+ **/
+ if (arguments.length >= 4) {
+ varbinds = arguments[1];
+ options = arguments[2];
+ responseCb = arguments[3];
+ } else if (arguments.length >= 3) {
+ if (arguments[1].constructor != Array) {
+ varbinds = [];
+ options = arguments[1];
+ responseCb = arguments[2];
+ } else {
+ varbinds = arguments[1];
+ responseCb = arguments[2];
+ }
+ } else {
+ varbinds = [];
+ responseCb = arguments[1];
+ }
+
+ function feedCb (req, message) {
+ var pdu = message.pdu;
+ var varbinds = [];
+
+ if (req.message.pdu.varbinds.length != pdu.varbinds.length) {
+ req.responseCb (new ResponseInvalidError ("Inform OIDs do not "
+ + "match response OIDs"));
+ } else {
+ for (var i = 0; i < req.message.pdu.varbinds.length; i++) {
+ if (req.message.pdu.varbinds[i].oid != pdu.varbinds[i].oid) {
+ req.responseCb (new ResponseInvalidError ("OID '"
+ + req.message.pdu.varbinds[i].oid
+ + "' in inform at positiion '" + i + "' does not "
+ + "match OID '" + pdu.varbinds[i].oid + "' in response "
+ + "at position '" + i + "'"));
+ return;
+ } else {
+ varbinds.push (pdu.varbinds[i]);
+ }
+ }
+
+ req.responseCb (null, varbinds);
+ }
+ }
+
+ if (typeof typeOrOid != "string")
+ typeOrOid = "1.3.6.1.6.3.1.1.5." + (typeOrOid + 1);
+
+ var pduVarbinds = [
+ {
+ oid: "1.3.6.1.2.1.1.3.0",
+ type: ObjectType.TimeTicks,
+ value: options.upTime || Math.floor (process.uptime () * 100)
+ },
+ {
+ oid: "1.3.6.1.6.3.1.1.4.1.0",
+ type: ObjectType.OID,
+ value: typeOrOid
+ }
+ ];
+
+ for (var i = 0; i < varbinds.length; i++) {
+ var varbind = {
+ oid: varbinds[i].oid,
+ type: varbinds[i].type,
+ value: varbinds[i].value
+ };
+ pduVarbinds.push (varbind);
+ }
+
+ options.port = this.trapPort;
+
+ this.simpleGet (InformRequestPdu, feedCb, pduVarbinds, responseCb, options);
+
+ return this;
+};
+
+Session.prototype.onClose = function () {
+ this.cancelRequests (new Error ("Socket forcibly closed"));
+ this.emit ("close");
+};
+
+Session.prototype.onError = function (error) {
+ this.emit (error);
+};
+
+Session.prototype.onMsg = function (buffer, remote) {
+ try {
+ var message = new ResponseMessage (buffer);
+
+ var req = this.unregisterRequest (message.pdu.id);
+ if (! req)
+ return;
+
+ try {
+ if (message.version != req.message.version) {
+ req.responseCb (new ResponseInvalidError ("Version in request '"
+ + req.message.version + "' does not match version in "
+ + "response '" + message.version));
+ } else if (message.community != req.message.community) {
+ req.responseCb (new ResponseInvalidError ("Community '"
+ + req.message.community + "' in request does not match "
+ + "community '" + message.community + "' in response"));
+ } else if (message.pdu.type == PduType.GetResponse) {
+ req.onResponse (req, message);
+ } else {
+ req.responseCb (new ResponseInvalidError ("Unknown PDU type '"
+ + message.pdu.type + "' in response"));
+ }
+ } catch (error) {
+ req.responseCb (error);
+ }
+ } catch (error) {
+ this.emit("error", error);
+ }
+};
+
+Session.prototype.onSimpleGetResponse = function (req, message) {
+ var pdu = message.pdu;
+
+ if (pdu.errorStatus > 0) {
+ var statusString = ErrorStatus[pdu.errorStatus]
+ || ErrorStatus.GeneralError;
+ var statusCode = ErrorStatus[statusString]
+ || ErrorStatus[ErrorStatus.GeneralError];
+
+ if (pdu.errorIndex <= 0 || pdu.errorIndex > pdu.varbinds.length) {
+ req.responseCb (new RequestFailedError (statusString, statusCode));
+ } else {
+ var oid = pdu.varbinds[pdu.errorIndex - 1].oid;
+ var error = new RequestFailedError (statusString + ": " + oid,
+ statusCode);
+ req.responseCb (error);
+ }
+ } else {
+ req.feedCb (req, message);
+ }
+};
+
+Session.prototype.registerRequest = function (req) {
+ if (! this.reqs[req.id]) {
+ this.reqs[req.id] = req;
+ if (this.reqCount <= 0)
+ this.dgram.ref();
+ this.reqCount++;
+ }
+ var me = this;
+ req.timer = setTimeout (function () {
+ if (req.retries-- > 0) {
+ me.send (req);
+ } else {
+ me.unregisterRequest (req.id);
+ req.responseCb (new RequestTimedOutError (
+ "Request timed out"));
+ }
+ }, req.timeout);
+};
+
+Session.prototype.send = function (req, noWait) {
+ try {
+ var me = this;
+
+ var buffer = req.message.toBuffer ();
+
+ this.dgram.send (buffer, 0, buffer.length, req.port, this.target,
+ function (error, bytes) {
+ if (error) {
+ req.responseCb (error);
+ } else {
+ if (noWait) {
+ req.responseCb (null);
+ } else {
+ me.registerRequest (req);
+ }
+ }
+ });
+ } catch (error) {
+ req.responseCb (error);
+ }
+
+ return this;
+};
+
+Session.prototype.set = function (varbinds, responseCb) {
+ function feedCb (req, message) {
+ var pdu = message.pdu;
+ var varbinds = [];
+
+ if (req.message.pdu.varbinds.length != pdu.varbinds.length) {
+ req.responseCb (new ResponseInvalidError ("Requested OIDs do not "
+ + "match response OIDs"));
+ } else {
+ for (var i = 0; i < req.message.pdu.varbinds.length; i++) {
+ if (req.message.pdu.varbinds[i].oid != pdu.varbinds[i].oid) {
+ req.responseCb (new ResponseInvalidError ("OID '"
+ + req.message.pdu.varbinds[i].oid
+ + "' in request at positiion '" + i + "' does not "
+ + "match OID '" + pdu.varbinds[i].oid + "' in response "
+ + "at position '" + i + "'"));
+ return;
+ } else {
+ varbinds.push (pdu.varbinds[i]);
+ }
+ }
+
+ req.responseCb (null, varbinds);
+ }
+ }
+
+ var pduVarbinds = [];
+
+ for (var i = 0; i < varbinds.length; i++) {
+ var varbind = {
+ oid: varbinds[i].oid,
+ type: varbinds[i].type,
+ value: varbinds[i].value
+ };
+ pduVarbinds.push (varbind);
+ }
+
+ this.simpleGet (SetRequestPdu, feedCb, pduVarbinds, responseCb);
+
+ return this;
+};
+
+Session.prototype.simpleGet = function (pduClass, feedCb, varbinds,
+ responseCb, options) {
+ var req = {};
+
+ try {
+ var id = _generateId (this.idBitsSize);
+ var pdu = new pduClass (id, varbinds, options);
+ var message = new RequestMessage (this.version, this.community, pdu);
+
+ req = {
+ id: id,
+ message: message,
+ responseCb: responseCb,
+ retries: this.retries,
+ timeout: this.timeout,
+ onResponse: this.onSimpleGetResponse,
+ feedCb: feedCb,
+ port: (options && options.port) ? options.port : this.port
+ };
+
+ this.send (req);
+ } catch (error) {
+ if (req.responseCb)
+ req.responseCb (error);
+ }
+};
+
+function subtreeCb (req, varbinds) {
+ var done = 0;
+
+ for (var i = varbinds.length; i > 0; i--) {
+ if (! oidInSubtree (req.baseOid, varbinds[i - 1].oid)) {
+ done = 1;
+ varbinds.pop ();
+ }
+ }
+
+ if (varbinds.length > 0)
+ req.feedCb (varbinds);
+
+ if (done)
+ return true;
+}
+
+Session.prototype.subtree = function () {
+ var me = this;
+ var oid = arguments[0];
+ var maxRepetitions, feedCb, doneCb;
+
+ if (arguments.length < 4) {
+ maxRepetitions = 20;
+ feedCb = arguments[1];
+ doneCb = arguments[2];
+ } else {
+ maxRepetitions = arguments[1];
+ feedCb = arguments[2];
+ doneCb = arguments[3];
+ }
+
+ var req = {
+ feedCb: feedCb,
+ doneCb: doneCb,
+ maxRepetitions: maxRepetitions,
+ baseOid: oid
+ };
+
+ this.walk (oid, maxRepetitions, subtreeCb.bind (me, req), doneCb);
+
+ return this;
+};
+
+function tableColumnsResponseCb (req, error) {
+ if (error) {
+ req.responseCb (error);
+ } else if (req.error) {
+ req.responseCb (req.error);
+ } else {
+ if (req.columns.length > 0) {
+ var column = req.columns.pop ();
+ var me = this;
+ this.subtree (req.rowOid + column, req.maxRepetitions,
+ tableColumnsFeedCb.bind (me, req),
+ tableColumnsResponseCb.bind (me, req));
+ } else {
+ req.responseCb (null, req.table);
+ }
+ }
+}
+
+function tableColumnsFeedCb (req, varbinds) {
+ for (var i = 0; i < varbinds.length; i++) {
+ if (isVarbindError (varbinds[i])) {
+ req.error = new RequestFailedError (varbindError (varbind[i]));
+ return true;
+ }
+
+ var oid = varbinds[i].oid.replace (req.rowOid, "");
+ if (oid && oid != varbinds[i].oid) {
+ var match = oid.match (/^(\d+)\.(.+)$/);
+ if (match && match[1] > 0) {
+ if (! req.table[match[2]])
+ req.table[match[2]] = {};
+ req.table[match[2]][match[1]] = varbinds[i].value;
+ }
+ }
+ }
+}
+
+Session.prototype.tableColumns = function () {
+ var me = this;
+
+ var oid = arguments[0];
+ var columns = arguments[1];
+ var maxRepetitions, responseCb;
+
+ if (arguments.length < 4) {
+ responseCb = arguments[2];
+ maxRepetitions = 20;
+ } else {
+ maxRepetitions = arguments[2];
+ responseCb = arguments[3];
+ }
+
+ var req = {
+ responseCb: responseCb,
+ maxRepetitions: maxRepetitions,
+ baseOid: oid,
+ rowOid: oid + ".1.",
+ columns: columns.slice(0),
+ table: {}
+ };
+
+ if (req.columns.length > 0) {
+ var column = req.columns.pop ();
+ this.subtree (req.rowOid + column, maxRepetitions,
+ tableColumnsFeedCb.bind (me, req),
+ tableColumnsResponseCb.bind (me, req));
+ }
+
+ return this;
+};
+
+function tableResponseCb (req, error) {
+ if (error)
+ req.responseCb (error);
+ else if (req.error)
+ req.responseCb (req.error);
+ else
+ req.responseCb (null, req.table);
+}
+
+function tableFeedCb (req, varbinds) {
+ for (var i = 0; i < varbinds.length; i++) {
+ if (isVarbindError (varbinds[i])) {
+ req.error = new RequestFailedError (varbindError (varbind[i]));
+ return true;
+ }
+
+ var oid = varbinds[i].oid.replace (req.rowOid, "");
+ if (oid && oid != varbinds[i].oid) {
+ var match = oid.match (/^(\d+)\.(.+)$/);
+ if (match && match[1] > 0) {
+ if (! req.table[match[2]])
+ req.table[match[2]] = {};
+ req.table[match[2]][match[1]] = varbinds[i].value;
+ }
+ }
+ }
+}
+
+Session.prototype.table = function () {
+ var me = this;
+
+ var oid = arguments[0];
+ var maxRepetitions, responseCb;
+
+ if (arguments.length < 3) {
+ responseCb = arguments[1];
+ maxRepetitions = 20;
+ } else {
+ maxRepetitions = arguments[1];
+ responseCb = arguments[2];
+ }
+
+ var req = {
+ responseCb: responseCb,
+ maxRepetitions: maxRepetitions,
+ baseOid: oid,
+ rowOid: oid + ".1.",
+ table: {}
+ };
+
+ this.subtree (oid, maxRepetitions, tableFeedCb.bind (me, req),
+ tableResponseCb.bind (me, req));
+
+ return this;
+};
+
+Session.prototype.trap = function () {
+ var req = {};
+
+ try {
+ var typeOrOid = arguments[0];
+ var varbinds, options = {}, responseCb;
+
+ /**
+ ** Support the following signatures:
+ **
+ ** typeOrOid, varbinds, options, callback
+ ** typeOrOid, varbinds, agentAddr, callback
+ ** typeOrOid, varbinds, callback
+ ** typeOrOid, agentAddr, callback
+ ** typeOrOid, options, callback
+ ** typeOrOid, callback
+ **/
+ if (arguments.length >= 4) {
+ varbinds = arguments[1];
+ if (typeof arguments[2] == "string") {
+ options.agentAddr = arguments[2];
+ } else if (arguments[2].constructor != Array) {
+ options = arguments[2];
+ }
+ responseCb = arguments[3];
+ } else if (arguments.length >= 3) {
+ if (typeof arguments[1] == "string") {
+ varbinds = [];
+ options.agentAddr = arguments[1];
+ } else if (arguments[1].constructor != Array) {
+ varbinds = [];
+ options = arguments[1];
+ } else {
+ varbinds = arguments[1];
+ agentAddr = null;
+ }
+ responseCb = arguments[2];
+ } else {
+ varbinds = [];
+ responseCb = arguments[1];
+ }
+
+ var pdu, pduVarbinds = [];
+
+ for (var i = 0; i < varbinds.length; i++) {
+ var varbind = {
+ oid: varbinds[i].oid,
+ type: varbinds[i].type,
+ value: varbinds[i].value
+ };
+ pduVarbinds.push (varbind);
+ }
+
+ var id = _generateId (this.idBitsSize);
+
+ if (this.version == Version2c) {
+ if (typeof typeOrOid != "string")
+ typeOrOid = "1.3.6.1.6.3.1.1.5." + (typeOrOid + 1);
+
+ pduVarbinds.unshift (
+ {
+ oid: "1.3.6.1.2.1.1.3.0",
+ type: ObjectType.TimeTicks,
+ value: options.upTime || Math.floor (process.uptime () * 100)
+ },
+ {
+ oid: "1.3.6.1.6.3.1.1.4.1.0",
+ type: ObjectType.OID,
+ value: typeOrOid
+ }
+ );
+
+ pdu = new TrapV2Pdu (id, pduVarbinds, options);
+ } else {
+ pdu = new TrapPdu (typeOrOid, pduVarbinds, options);
+ }
+
+ var message = new RequestMessage (this.version, this.community, pdu);
+
+ req = {
+ id: id,
+ message: message,
+ responseCb: responseCb,
+ port: this.trapPort
+ };
+
+ this.send (req, true);
+ } catch (error) {
+ if (req.responseCb)
+ req.responseCb (error);
+ }
+
+ return this;
+};
+
+Session.prototype.unregisterRequest = function (id) {
+ var req = this.reqs[id];
+ if (req) {
+ delete this.reqs[id];
+ clearTimeout (req.timer);
+ delete req.timer;
+ this.reqCount--;
+ if (this.reqCount <= 0)
+ this.dgram.unref();
+ return req;
+ } else {
+ return null;
+ }
+};
+
+function walkCb (req, error, varbinds) {
+ var done = 0;
+ var oid;
+
+ if (error) {
+ if (error instanceof RequestFailedError) {
+ if (error.status != ErrorStatus.NoSuchName) {
+ req.doneCb (error);
+ return;
+ } else {
+ // signal the version 1 walk code below that it should stop
+ done = 1;
+ }
+ } else {
+ req.doneCb (error);
+ return;
+ }
+ }
+
+ if (this.version == Version2c) {
+ for (var i = varbinds[0].length; i > 0; i--) {
+ if (varbinds[0][i - 1].type == ObjectType.EndOfMibView) {
+ varbinds[0].pop ();
+ done = 1;
+ }
+ }
+ if (req.feedCb (varbinds[0]))
+ done = 1;
+ if (! done)
+ oid = varbinds[0][varbinds[0].length - 1].oid;
+ } else {
+ if (! done) {
+ if (req.feedCb (varbinds)) {
+ done = 1;
+ } else {
+ oid = varbinds[0].oid;
+ }
+ }
+ }
+
+ if (done)
+ req.doneCb (null);
+ else
+ this.walk (oid, req.maxRepetitions, req.feedCb, req.doneCb,
+ req.baseOid);
+}
+
+Session.prototype.walk = function () {
+ var me = this;
+ var oid = arguments[0];
+ var maxRepetitions, feedCb, doneCb, baseOid;
+
+ if (arguments.length < 4) {
+ maxRepetitions = 20;
+ feedCb = arguments[1];
+ doneCb = arguments[2];
+ } else {
+ maxRepetitions = arguments[1];
+ feedCb = arguments[2];
+ doneCb = arguments[3];
+ }
+
+ var req = {
+ maxRepetitions: maxRepetitions,
+ feedCb: feedCb,
+ doneCb: doneCb
+ };
+
+ if (this.version == Version2c)
+ this.getBulk ([oid], 0, maxRepetitions,
+ walkCb.bind (me, req));
+ else
+ this.getNext ([oid], walkCb.bind (me, req));
+
+ return this;
+};
+
+/*****************************************************************************
+ ** Exports
+ **/
+
+exports.Session = Session;
+
+exports.createSession = function (target, community, options) {
+ return new Session (target, community, options);
+};
+
+exports.isVarbindError = isVarbindError;
+exports.varbindError = varbindError;
+
+exports.Version1 = Version1;
+exports.Version2c = Version2c;
+
+exports.ErrorStatus = ErrorStatus;
+exports.TrapType = TrapType;
+exports.ObjectType = ObjectType;
+
+exports.ResponseInvalidError = ResponseInvalidError;
+exports.RequestInvalidError = RequestInvalidError;
+exports.RequestFailedError = RequestFailedError;
+exports.RequestTimedOutError = RequestTimedOutError;
+
+/**
+ ** We've added this for testing.
+ **/
+exports.ObjectParser = {
+ readInt: readInt,
+ readUint: readUint
+};
diff --git a/collectors/node.d.plugin/node_modules/netdata.js b/collectors/node.d.plugin/node_modules/netdata.js
new file mode 100644
index 000000000..603922c6e
--- /dev/null
+++ b/collectors/node.d.plugin/node_modules/netdata.js
@@ -0,0 +1,654 @@
+'use strict';
+
+// netdata
+// real-time performance and health monitoring, done right!
+// (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+var url = require('url');
+var http = require('http');
+var util = require('util');
+
+/*
+var netdata = require('netdata');
+
+var example_chart = {
+ id: 'id', // the unique id of the chart
+ name: 'name', // the name of the chart
+ title: 'title', // the title of the chart
+ units: 'units', // the units of the chart dimensions
+ family: 'family', // the family of the chart
+ context: 'context', // the context of the chart
+ type: netdata.chartTypes.line, // the type of the chart
+ priority: 0, // the priority relative to others in the same family
+ update_every: 1, // the expected update frequency of the chart
+ dimensions: {
+ 'dim1': {
+ id: 'dim1', // the unique id of the dimension
+ name: 'name', // the name of the dimension
+ algorithm: netdata.chartAlgorithms.absolute, // the id of the netdata algorithm
+ multiplier: 1, // the multiplier
+ divisor: 1, // the divisor
+ hidden: false, // is hidden (boolean)
+ },
+ 'dim2': {
+ id: 'dim2', // the unique id of the dimension
+ name: 'name', // the name of the dimension
+ algorithm: 'absolute', // the id of the netdata algorithm
+ multiplier: 1, // the multiplier
+ divisor: 1, // the divisor
+ hidden: false, // is hidden (boolean)
+ }
+ // add as many dimensions as needed
+ }
+};
+*/
+
+var netdata = {
+ options: {
+ filename: __filename,
+ DEBUG: false,
+ update_every: 1
+ },
+
+ chartAlgorithms: {
+ incremental: 'incremental',
+ absolute: 'absolute',
+ percentage_of_absolute_row: 'percentage-of-absolute-row',
+ percentage_of_incremental_row: 'percentage-of-incremental-row'
+ },
+
+ chartTypes: {
+ line: 'line',
+ area: 'area',
+ stacked: 'stacked'
+ },
+
+ services: new Array(),
+ modules_configuring: 0,
+ charts: {},
+
+ processors: {
+ http: {
+ name: 'http',
+
+ process: function(service, callback) {
+ var __DEBUG = netdata.options.DEBUG;
+
+ if(__DEBUG === true)
+ netdata.debug(service.module.name + ': ' + service.name + ': making ' + this.name + ' request: ' + netdata.stringify(service.request));
+
+ var req = http.request(service.request, function(response) {
+ if(__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': got server response...');
+
+ var end = false;
+ var data = '';
+ response.setEncoding('utf8');
+
+ if(response.statusCode !== 200) {
+ if(end === false) {
+ service.error('Got HTTP code ' + response.statusCode + ', failed to get data.');
+ end = true;
+ return callback(null);
+ }
+ }
+
+ response.on('data', function(chunk) {
+ if(end === false) data += chunk;
+ });
+
+ response.on('error', function() {
+ if(end === false) {
+ service.error(': Read error, failed to get data.');
+ end = true;
+ return callback(null);
+ }
+ });
+
+ response.on('end', function() {
+ if(end === false) {
+ if(__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': read completed.');
+ end = true;
+ return callback(data);
+ }
+ });
+ });
+
+ req.on('error', function(e) {
+ if(__DEBUG === true) netdata.debug('Failed to make request: ' + netdata.stringify(service.request) + ', message: ' + e.message);
+ service.error('Failed to make request, message: ' + e.message);
+ return callback(null);
+ });
+
+ // write data to request body
+ if(typeof service.postData !== 'undefined' && service.request.method === 'POST') {
+ if(__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': posting data: ' + service.postData);
+ req.write(service.postData);
+ }
+
+ req.end();
+ }
+ }
+ },
+
+ stringify: function(obj) {
+ return util.inspect(obj, {depth: 10});
+ },
+
+ zeropad2: function(s) {
+ return ("00" + s).slice(-2);
+ },
+
+ logdate: function(d) {
+ if(typeof d === 'undefined') d = new Date();
+ return d.getFullYear().toString() + '-' + this.zeropad2(d.getMonth() + 1) + '-' + this.zeropad2(d.getDate())
+ + ' ' + this.zeropad2(d.getHours()) + ':' + this.zeropad2(d.getMinutes()) + ':' + this.zeropad2(d.getSeconds());
+ },
+
+ // show debug info, if debug is enabled
+ debug: function(msg) {
+ if(this.options.DEBUG === true) {
+ console.error(this.logdate() + ': ' + netdata.options.filename + ': DEBUG: ' + ((typeof(msg) === 'object')?netdata.stringify(msg):msg).toString());
+ }
+ },
+
+ // log an error
+ error: function(msg) {
+ console.error(this.logdate() + ': ' + netdata.options.filename + ': ERROR: ' + ((typeof(msg) === 'object')?netdata.stringify(msg):msg).toString());
+ },
+
+ // send data to netdata
+ send: function(msg) {
+ console.log(msg.toString());
+ },
+
+ service: function(service) {
+ if(typeof service === 'undefined')
+ service = {};
+
+ var now = Date.now();
+
+ service._current_chart = null; // the current chart we work on
+ service._queue = ''; // data to be sent to netdata
+
+ service.error_reported = false; // error log flood control
+
+ service.added = false; // added to netdata.services
+ service.enabled = true;
+ service.updates = 0;
+ service.running = false;
+ service.started = 0;
+ service.ended = 0;
+
+ if(typeof service.module === 'undefined') {
+ service.module = { name: 'not-defined-module' };
+ service.error('Attempted to create service without a module.');
+ service.enabled = false;
+ }
+
+ if(typeof service.name === 'undefined') {
+ service.name = 'unnamed@' + service.module.name + '/' + now;
+ }
+
+ if(typeof service.processor === 'undefined')
+ service.processor = netdata.processors.http;
+
+ if(typeof service.update_every === 'undefined')
+ service.update_every = service.module.update_every;
+
+ if(typeof service.update_every === 'undefined')
+ service.update_every = netdata.options.update_every;
+
+ if(service.update_every < netdata.options.update_every)
+ service.update_every = netdata.options.update_every;
+
+ // align the runs
+ service.next_run = now - (now % (service.update_every * 1000)) + (service.update_every * 1000);
+
+ service.commit = function() {
+ if(this.added !== true) {
+ this.added = true;
+
+ var now = Date.now();
+ this.next_run = now - (now % (service.update_every * 1000)) + (service.update_every * 1000);
+
+ netdata.services.push(this);
+ if(netdata.options.DEBUG === true) netdata.debug(this.module.name + ': ' + this.name + ': service committed.');
+ }
+ };
+
+ service.execute = function(responseProcessor) {
+ var __DEBUG = netdata.options.DEBUG;
+
+ if(service.enabled === false)
+ return responseProcessor(null);
+
+ this.module.active++;
+ this.running = true;
+ this.started = Date.now();
+ this.updates++;
+
+ if(__DEBUG === true)
+ netdata.debug(this.module.name + ': ' + this.name + ': making ' + this.processor.name + ' request: ' + netdata.stringify(this));
+
+ this.processor.process(this, function(response) {
+ service.ended = Date.now();
+ service.duration = service.ended - service.started;
+
+ if(typeof response === 'undefined')
+ response = null;
+
+ if(response !== null)
+ service.errorClear();
+
+ if(__DEBUG === true)
+ netdata.debug(service.module.name + ': ' + service.name + ': processing ' + service.processor.name + ' response (received in ' + (service.ended - service.started).toString() + ' ms)');
+
+ try {
+ responseProcessor(service, response);
+ }
+ catch(e) {
+ netdata.error(e);
+ service.error("responseProcessor failed process response data.");
+ }
+
+ service.running = false;
+ service.module.active--;
+ if(service.module.active < 0) {
+ service.module.active = 0;
+ if(__DEBUG === true)
+ netdata.debug(service.module.name + ': active module counter below zero.');
+ }
+
+ if(service.module.active === 0) {
+ // check if we run under configure
+ if(service.module.configure_callback !== null) {
+ if(__DEBUG === true)
+ netdata.debug(service.module.name + ': configuration finish callback called from processResponse().');
+
+ var configure_callback = service.module.configure_callback;
+ service.module.configure_callback = null;
+ configure_callback();
+ }
+ }
+ });
+ };
+
+ service.update = function() {
+ if(netdata.options.DEBUG === true)
+ netdata.debug(this.module.name + ': ' + this.name + ': starting data collection...');
+
+ this.module.update(this, function() {
+ if(netdata.options.DEBUG === true)
+ netdata.debug(service.module.name + ': ' + service.name + ': data collection ended in ' + service.duration.toString() + ' ms.');
+ });
+ };
+
+ service.error = function(message) {
+ if(this.error_reported === false) {
+ netdata.error(this.module.name + ': ' + this.name + ': ' + message);
+ this.error_reported = true;
+ }
+ else if(netdata.options.DEBUG === true)
+ netdata.debug(this.module.name + ': ' + this.name + ': ' + message);
+ };
+
+ service.errorClear = function() {
+ this.error_reported = false;
+ };
+
+ service.queue = function(txt) {
+ this._queue += txt + '\n';
+ };
+
+ service._send_chart_to_netdata = function(chart) {
+ // internal function to send a chart to netdata
+ this.queue('CHART "' + chart.id + '" "' + chart.name + '" "' + chart.title + '" "' + chart.units + '" "' + chart.family + '" "' + chart.context + '" "' + chart.type + '" ' + chart.priority.toString() + ' ' + chart.update_every.toString());
+
+ if(typeof(chart.dimensions) !== 'undefined') {
+ var dims = Object.keys(chart.dimensions);
+ var len = dims.length;
+ while(len--) {
+ var d = chart.dimensions[dims[len]];
+
+ this.queue('DIMENSION "' + d.id + '" "' + d.name + '" "' + d.algorithm + '" ' + d.multiplier.toString() + ' ' + d.divisor.toString() + ' ' + ((d.hidden === true) ? 'hidden' : '').toString());
+ d._created = true;
+ d._updated = false;
+ }
+ }
+
+ chart._created = true;
+ chart._updated = false;
+ };
+
+ // begin data collection for a chart
+ service.begin = function(chart) {
+ if(this._current_chart !== null && this._current_chart !== chart) {
+ this.error('Called begin() for chart ' + chart.id + ' while chart ' + this._current_chart.id + ' is still open. Closing it.');
+ this.end();
+ }
+
+ if(typeof(chart.id) === 'undefined' || netdata.charts[chart.id] !== chart) {
+ this.error('Called begin() for chart ' + chart.id + ' that is not mine. Where did you find it? Ignoring it.');
+ return false;
+ }
+
+ if(netdata.options.DEBUG === true) netdata.debug('setting current chart to ' + chart.id);
+ this._current_chart = chart;
+ this._current_chart._began = true;
+
+ if(this._current_chart._dimensions_count !== 0) {
+ if(this._current_chart._created === false || this._current_chart._updated === true)
+ this._send_chart_to_netdata(this._current_chart);
+
+ var now = this.ended;
+ this.queue('BEGIN ' + this._current_chart.id + ' ' + ((this._current_chart._last_updated > 0)?((now - this._current_chart._last_updated) * 1000):'').toString());
+ }
+ // else this.error('Called begin() for chart ' + chart.id + ' which is empty.');
+
+ this._current_chart._last_updated = now;
+ this._current_chart._began = true;
+ this._current_chart._counter++;
+
+ return true;
+ };
+
+ // set a collected value for a chart
+ // we do most things on the first value we attempt to set
+ service.set = function(dimension, value) {
+ if(this._current_chart === null) {
+ this.error('Called set(' + dimension + ', ' + value + ') without an open chart.');
+ return false;
+ }
+
+ if(typeof(this._current_chart.dimensions[dimension]) === 'undefined') {
+ this.error('Called set(' + dimension + ', ' + value + ') but dimension "' + dimension + '" does not exist in chart "' + this._current_chart.id + '".');
+ return false;
+ }
+
+ if(typeof value === 'undefined' || value === null)
+ return false;
+
+ if(this._current_chart._dimensions_count !== 0)
+ this.queue('SET ' + dimension + ' = ' + value.toString());
+
+ return true;
+ };
+
+ // end data collection for the current chart - after calling begin()
+ service.end = function() {
+ if(this._current_chart !== null && this._current_chart._began === false) {
+ this.error('Called end() without an open chart.');
+ return false;
+ }
+
+ if(this._current_chart._dimensions_count !== 0) {
+ this.queue('END');
+ netdata.send(this._queue);
+ }
+
+ this._queue = '';
+ this._current_chart._began = false;
+ if(netdata.options.DEBUG === true) netdata.debug('sent chart ' + this._current_chart.id);
+ this._current_chart = null;
+ return true;
+ };
+
+ // discard the collected values for the current chart - after calling begin()
+ service.flush = function() {
+ if(this._current_chart === null || this._current_chart._began === false) {
+ this.error('Called flush() without an open chart.');
+ return false;
+ }
+
+ this._queue = '';
+ this._current_chart._began = false;
+ this._current_chart = null;
+ return true;
+ };
+
+ // create a netdata chart
+ service.chart = function(id, chart) {
+ var __DEBUG = netdata.options.DEBUG;
+
+ if(typeof(netdata.charts[id]) === 'undefined') {
+ netdata.charts[id] = {
+ _created: false,
+ _updated: true,
+ _began: false,
+ _counter: 0,
+ _last_updated: 0,
+ _dimensions_count: 0,
+ id: id,
+ name: id,
+ title: 'untitled chart',
+ units: 'a unit',
+ family: '',
+ context: '',
+ type: netdata.chartTypes.line,
+ priority: 50000,
+ update_every: netdata.options.update_every,
+ dimensions: {}
+ };
+ }
+
+ var c = netdata.charts[id];
+
+ if(typeof(chart.name) !== 'undefined' && chart.name !== c.name) {
+ if(__DEBUG === true) netdata.debug('chart ' + id + ' updated its name');
+ c.name = chart.name;
+ c._updated = true;
+ }
+
+ if(typeof(chart.title) !== 'undefined' && chart.title !== c.title) {
+ if(__DEBUG === true) netdata.debug('chart ' + id + ' updated its title');
+ c.title = chart.title;
+ c._updated = true;
+ }
+
+ if(typeof(chart.units) !== 'undefined' && chart.units !== c.units) {
+ if(__DEBUG === true) netdata.debug('chart ' + id + ' updated its units');
+ c.units = chart.units;
+ c._updated = true;
+ }
+
+ if(typeof(chart.family) !== 'undefined' && chart.family !== c.family) {
+ if(__DEBUG === true) netdata.debug('chart ' + id + ' updated its family');
+ c.family = chart.family;
+ c._updated = true;
+ }
+
+ if(typeof(chart.context) !== 'undefined' && chart.context !== c.context) {
+ if(__DEBUG === true) netdata.debug('chart ' + id + ' updated its context');
+ c.context = chart.context;
+ c._updated = true;
+ }
+
+ if(typeof(chart.type) !== 'undefined' && chart.type !== c.type) {
+ if(__DEBUG === true) netdata.debug('chart ' + id + ' updated its type');
+ c.type = chart.type;
+ c._updated = true;
+ }
+
+ if(typeof(chart.priority) !== 'undefined' && chart.priority !== c.priority) {
+ if(__DEBUG === true) netdata.debug('chart ' + id + ' updated its priority');
+ c.priority = chart.priority;
+ c._updated = true;
+ }
+
+ if(typeof(chart.update_every) !== 'undefined' && chart.update_every !== c.update_every) {
+ if(__DEBUG === true) netdata.debug('chart ' + id + ' updated its update_every from ' + c.update_every + ' to ' + chart.update_every);
+ c.update_every = chart.update_every;
+ c._updated = true;
+ }
+
+ if(typeof(chart.dimensions) !== 'undefined') {
+ var dims = Object.keys(chart.dimensions);
+ var len = dims.length;
+ while(len--) {
+ var x = dims[len];
+
+ if(typeof(c.dimensions[x]) === 'undefined') {
+ c._dimensions_count++;
+
+ c.dimensions[x] = {
+ _created: false,
+ _updated: false,
+ id: x, // the unique id of the dimension
+ name: x, // the name of the dimension
+ algorithm: netdata.chartAlgorithms.absolute, // the id of the netdata algorithm
+ multiplier: 1, // the multiplier
+ divisor: 1, // the divisor
+ hidden: false // is hidden (boolean)
+ };
+
+ if(__DEBUG === true) netdata.debug('chart ' + id + ' created dimension ' + x);
+ c._updated = true;
+ }
+
+ var dim = chart.dimensions[x];
+ var d = c.dimensions[x];
+
+ if(typeof(dim.name) !== 'undefined' && d.name !== dim.name) {
+ if(__DEBUG === true) netdata.debug('chart ' + id + ', dimension ' + x + ' updated its name');
+ d.name = dim.name;
+ d._updated = true;
+ }
+
+ if(typeof(dim.algorithm) !== 'undefined' && d.algorithm !== dim.algorithm) {
+ if(__DEBUG === true) netdata.debug('chart ' + id + ', dimension ' + x + ' updated its algorithm from ' + d.algorithm + ' to ' + dim.algorithm);
+ d.algorithm = dim.algorithm;
+ d._updated = true;
+ }
+
+ if(typeof(dim.multiplier) !== 'undefined' && d.multiplier !== dim.multiplier) {
+ if(__DEBUG === true) netdata.debug('chart ' + id + ', dimension ' + x + ' updated its multiplier');
+ d.multiplier = dim.multiplier;
+ d._updated = true;
+ }
+
+ if(typeof(dim.divisor) !== 'undefined' && d.divisor !== dim.divisor) {
+ if(__DEBUG === true) netdata.debug('chart ' + id + ', dimension ' + x + ' updated its divisor');
+ d.divisor = dim.divisor;
+ d._updated = true;
+ }
+
+ if(typeof(dim.hidden) !== 'undefined' && d.hidden !== dim.hidden) {
+ if(__DEBUG === true) netdata.debug('chart ' + id + ', dimension ' + x + ' updated its hidden status');
+ d.hidden = dim.hidden;
+ d._updated = true;
+ }
+
+ if(d._updated) c._updated = true;
+ }
+ }
+
+ //if(netdata.options.DEBUG === true) netdata.debug(netdata.charts);
+ return netdata.charts[id];
+ };
+
+ return service;
+ },
+
+ runAllServices: function() {
+ if(netdata.options.DEBUG === true) netdata.debug('runAllServices()');
+
+ var now = Date.now();
+ var len = netdata.services.length;
+ while(len--) {
+ var service = netdata.services[len];
+
+ if(service.enabled === false || service.running === true) continue;
+ if(now <= service.next_run) continue;
+
+ service.update();
+
+ now = Date.now();
+ service.next_run = now - (now % (service.update_every * 1000)) + (service.update_every * 1000);
+ }
+
+ // 1/10th of update_every in pause
+ setTimeout(netdata.runAllServices, netdata.options.update_every * 100);
+ },
+
+ start: function() {
+ if(netdata.options.DEBUG === true) this.debug('started, services: ' + netdata.stringify(this.services));
+
+ if(this.services.length === 0) {
+ this.disableNodePlugin();
+
+ // eslint suggested way to exit
+ var exit = process.exit;
+ exit(1);
+ }
+ else this.runAllServices();
+ },
+
+ // disable the whole node.js plugin
+ disableNodePlugin: function() {
+ this.send('DISABLE');
+
+ // eslint suggested way to exit
+ var exit = process.exit;
+ exit(1);
+ },
+
+ requestFromParams: function(protocol, hostname, port, path, method) {
+ return {
+ protocol: protocol,
+ hostname: hostname,
+ port: port,
+ path: path,
+ //family: 4,
+ method: method,
+ headers: {
+ 'Content-Type': 'application/x-www-form-urlencoded',
+ 'Connection': 'keep-alive'
+ },
+ agent: new http.Agent({
+ keepAlive: true,
+ keepAliveMsecs: netdata.options.update_every * 1000,
+ maxSockets: 2, // it must be 2 to work
+ maxFreeSockets: 1
+ })
+ };
+ },
+
+ requestFromURL: function(a_url) {
+ var u = url.parse(a_url);
+ return netdata.requestFromParams(u.protocol, u.hostname, u.port, u.path, 'GET');
+ },
+
+ configure: function(module, config, callback) {
+ if(netdata.options.DEBUG === true) this.debug(module.name + ': configuring (update_every: ' + this.options.update_every + ')...');
+
+ module.active = 0;
+ module.update_every = this.options.update_every;
+
+ if(typeof config.update_every !== 'undefined')
+ module.update_every = config.update_every;
+
+ module.enable_autodetect = (config.enable_autodetect)?true:false;
+
+ if(typeof(callback) === 'function')
+ module.configure_callback = callback;
+ else
+ module.configure_callback = null;
+
+ var added = module.configure(config);
+
+ if(netdata.options.DEBUG === true) this.debug(module.name + ': configured, reporting ' + added + ' eligible services.');
+
+ if(module.configure_callback !== null && added === 0) {
+ if(netdata.options.DEBUG === true) this.debug(module.name + ': configuration finish callback called from configure().');
+ var configure_callback = module.configure_callback;
+ module.configure_callback = null;
+ configure_callback();
+ }
+
+ return added;
+ }
+};
+
+if(netdata.options.DEBUG === true) netdata.debug('loaded netdata from:', __filename);
+module.exports = netdata;
diff --git a/collectors/node.d.plugin/node_modules/pixl-xml.js b/collectors/node.d.plugin/node_modules/pixl-xml.js
new file mode 100644
index 000000000..48de89e77
--- /dev/null
+++ b/collectors/node.d.plugin/node_modules/pixl-xml.js
@@ -0,0 +1,607 @@
+// SPDX-License-Identifier: MIT
+/*
+ JavaScript XML Library
+ Plus a bunch of object utility functions
+
+ Usage:
+ var XML = require('pixl-xml');
+ var myxmlstring = '<?xml version="1.0"?><Document>' +
+ '<Simple>Hello</Simple>' +
+ '<Node Key="Value">Content</Node>' +
+ '</Document>';
+
+ var tree = XML.parse( myxmlstring, { preserveAttributes: true });
+ console.log( tree );
+
+ tree.Simple = "Hello2";
+ tree.Node._Attribs.Key = "Value2";
+ tree.Node._Data = "Content2";
+ tree.New = "I added this";
+
+ console.log( XML.stringify( tree, 'Document' ) );
+
+ Copyright (c) 2004 - 2015 Joseph Huckaby
+ Released under the MIT License
+ This version is for Node.JS, converted in 2012.
+*/
+
+var fs = require('fs');
+
+var indent_string = "\t";
+var xml_header = '<?xml version="1.0"?>';
+var sort_args = null;
+var re_valid_tag_name = /^\w[\w\-\:]*$/;
+
+var XML = exports.XML = function XML(args) {
+ // class constructor for XML parser class
+ // pass in args hash or text to parse
+ if (!args) args = '';
+ if (isa_hash(args)) {
+ for (var key in args) this[key] = args[key];
+ }
+ else this.text = args || '';
+
+ // stringify buffers
+ if (this.text instanceof Buffer) {
+ this.text = this.text.toString();
+ }
+
+ if (!this.text.match(/^\s*</)) {
+ // try as file path
+ var file = this.text;
+ this.text = fs.readFileSync(file, { encoding: 'utf8' });
+ if (!this.text) throw new Error("File not found: " + file);
+ }
+
+ this.tree = {};
+ this.errors = [];
+ this.piNodeList = [];
+ this.dtdNodeList = [];
+ this.documentNodeName = '';
+
+ if (this.lowerCase) {
+ this.attribsKey = this.attribsKey.toLowerCase();
+ this.dataKey = this.dataKey.toLowerCase();
+ }
+
+ this.patTag.lastIndex = 0;
+ if (this.text) this.parse();
+}
+
+XML.prototype.preserveAttributes = false;
+XML.prototype.lowerCase = false;
+
+XML.prototype.patTag = /([^<]*?)<([^>]+)>/g;
+XML.prototype.patSpecialTag = /^\s*([\!\?])/;
+XML.prototype.patPITag = /^\s*\?/;
+XML.prototype.patCommentTag = /^\s*\!--/;
+XML.prototype.patDTDTag = /^\s*\!DOCTYPE/;
+XML.prototype.patCDATATag = /^\s*\!\s*\[\s*CDATA/;
+XML.prototype.patStandardTag = /^\s*(\/?)([\w\-\:\.]+)\s*(.*)$/;
+XML.prototype.patSelfClosing = /\/\s*$/;
+XML.prototype.patAttrib = new RegExp("([\\w\\-\\:\\.]+)\\s*=\\s*([\\\"\\'])([^\\2]*?)\\2", "g");
+XML.prototype.patPINode = /^\s*\?\s*([\w\-\:]+)\s*(.*)$/;
+XML.prototype.patEndComment = /--$/;
+XML.prototype.patNextClose = /([^>]*?)>/g;
+XML.prototype.patExternalDTDNode = new RegExp("^\\s*\\!DOCTYPE\\s+([\\w\\-\\:]+)\\s+(SYSTEM|PUBLIC)\\s+\\\"([^\\\"]+)\\\"");
+XML.prototype.patInlineDTDNode = /^\s*\!DOCTYPE\s+([\w\-\:]+)\s+\[/;
+XML.prototype.patEndDTD = /\]$/;
+XML.prototype.patDTDNode = /^\s*\!DOCTYPE\s+([\w\-\:]+)\s+\[(.*)\]/;
+XML.prototype.patEndCDATA = /\]\]$/;
+XML.prototype.patCDATANode = /^\s*\!\s*\[\s*CDATA\s*\[([^]*)\]\]/;
+
+XML.prototype.attribsKey = '_Attribs';
+XML.prototype.dataKey = '_Data';
+
+XML.prototype.parse = function(branch, name) {
+ // parse text into XML tree, recurse for nested nodes
+ if (!branch) branch = this.tree;
+ if (!name) name = null;
+ var foundClosing = false;
+ var matches = null;
+
+ // match each tag, plus preceding text
+ while ( matches = this.patTag.exec(this.text) ) {
+ var before = matches[1];
+ var tag = matches[2];
+
+ // text leading up to tag = content of parent node
+ if (before.match(/\S/)) {
+ if (typeof(branch[this.dataKey]) != 'undefined') branch[this.dataKey] += ' '; else branch[this.dataKey] = '';
+ branch[this.dataKey] += trim(decode_entities(before));
+ }
+
+ // parse based on tag type
+ if (tag.match(this.patSpecialTag)) {
+ // special tag
+ if (tag.match(this.patPITag)) tag = this.parsePINode(tag);
+ else if (tag.match(this.patCommentTag)) tag = this.parseCommentNode(tag);
+ else if (tag.match(this.patDTDTag)) tag = this.parseDTDNode(tag);
+ else if (tag.match(this.patCDATATag)) {
+ tag = this.parseCDATANode(tag);
+ if (typeof(branch[this.dataKey]) != 'undefined') branch[this.dataKey] += ' '; else branch[this.dataKey] = '';
+ branch[this.dataKey] += trim(decode_entities(tag));
+ } // cdata
+ else {
+ this.throwParseError( "Malformed special tag", tag );
+ break;
+ } // error
+
+ if (tag == null) break;
+ continue;
+ } // special tag
+ else {
+ // Tag is standard, so parse name and attributes (if any)
+ var matches = tag.match(this.patStandardTag);
+ if (!matches) {
+ this.throwParseError( "Malformed tag", tag );
+ break;
+ }
+
+ var closing = matches[1];
+ var nodeName = this.lowerCase ? matches[2].toLowerCase() : matches[2];
+ var attribsRaw = matches[3];
+
+ // If this is a closing tag, make sure it matches its opening tag
+ if (closing) {
+ if (nodeName == (name || '')) {
+ foundClosing = 1;
+ break;
+ }
+ else {
+ this.throwParseError( "Mismatched closing tag (expected </" + name + ">)", tag );
+ break;
+ }
+ } // closing tag
+ else {
+ // Not a closing tag, so parse attributes into hash. If tag
+ // is self-closing, no recursive parsing is needed.
+ var selfClosing = !!attribsRaw.match(this.patSelfClosing);
+ var leaf = {};
+ var attribs = leaf;
+
+ // preserve attributes means they go into a sub-hash named "_Attribs"
+ // the XML composer honors this for restoring the tree back into XML
+ if (this.preserveAttributes) {
+ leaf[this.attribsKey] = {};
+ attribs = leaf[this.attribsKey];
+ }
+
+ // parse attributes
+ this.patAttrib.lastIndex = 0;
+ while ( matches = this.patAttrib.exec(attribsRaw) ) {
+ var key = this.lowerCase ? matches[1].toLowerCase() : matches[1];
+ attribs[ key ] = decode_entities( matches[3] );
+ } // foreach attrib
+
+ // if no attribs found, but we created the _Attribs subhash, clean it up now
+ if (this.preserveAttributes && !num_keys(attribs)) {
+ delete leaf[this.attribsKey];
+ }
+
+ // Recurse for nested nodes
+ if (!selfClosing) {
+ this.parse( leaf, nodeName );
+ if (this.error()) break;
+ }
+
+ // Compress into simple node if text only
+ var num_leaf_keys = num_keys(leaf);
+ if ((typeof(leaf[this.dataKey]) != 'undefined') && (num_leaf_keys == 1)) {
+ leaf = leaf[this.dataKey];
+ }
+ else if (!num_leaf_keys) {
+ leaf = '';
+ }
+
+ // Add leaf to parent branch
+ if (typeof(branch[nodeName]) != 'undefined') {
+ if (isa_array(branch[nodeName])) {
+ branch[nodeName].push( leaf );
+ }
+ else {
+ var temp = branch[nodeName];
+ branch[nodeName] = [ temp, leaf ];
+ }
+ }
+ else {
+ branch[nodeName] = leaf;
+ }
+
+ if (this.error() || (branch == this.tree)) break;
+ } // not closing
+ } // standard tag
+ } // main reg exp
+
+ // Make sure we found the closing tag
+ if (name && !foundClosing) {
+ this.throwParseError( "Missing closing tag (expected </" + name + ">)", name );
+ }
+
+ // If we are the master node, finish parsing and setup our doc node
+ if (branch == this.tree) {
+ if (typeof(this.tree[this.dataKey]) != 'undefined') delete this.tree[this.dataKey];
+
+ if (num_keys(this.tree) > 1) {
+ this.throwParseError( 'Only one top-level node is allowed in document', first_key(this.tree) );
+ return;
+ }
+
+ this.documentNodeName = first_key(this.tree);
+ if (this.documentNodeName) {
+ this.tree = this.tree[this.documentNodeName];
+ }
+ }
+};
+
+XML.prototype.throwParseError = function(key, tag) {
+ // log error and locate current line number in source XML document
+ var parsedSource = this.text.substring(0, this.patTag.lastIndex);
+ var eolMatch = parsedSource.match(/\n/g);
+ var lineNum = (eolMatch ? eolMatch.length : 0) + 1;
+ lineNum -= tag.match(/\n/) ? tag.match(/\n/g).length : 0;
+
+ this.errors.push({
+ type: 'Parse',
+ key: key,
+ text: '<' + tag + '>',
+ line: lineNum
+ });
+
+ // Throw actual error (must wrap parse in try/catch)
+ throw new Error( this.getLastError() );
+};
+
+XML.prototype.error = function() {
+ // return number of errors
+ return this.errors.length;
+};
+
+XML.prototype.getError = function(error) {
+ // get formatted error
+ var text = '';
+ if (!error) return '';
+
+ text = (error.type || 'General') + ' Error';
+ if (error.code) text += ' ' + error.code;
+ text += ': ' + error.key;
+
+ if (error.line) text += ' on line ' + error.line;
+ if (error.text) text += ': ' + error.text;
+
+ return text;
+};
+
+XML.prototype.getLastError = function() {
+ // Get most recently thrown error in plain text format
+ if (!this.error()) return '';
+ return this.getError( this.errors[this.errors.length - 1] );
+};
+
+XML.prototype.parsePINode = function(tag) {
+ // Parse Processor Instruction Node, e.g. <?xml version="1.0"?>
+ if (!tag.match(this.patPINode)) {
+ this.throwParseError( "Malformed processor instruction", tag );
+ return null;
+ }
+
+ this.piNodeList.push( tag );
+ return tag;
+};
+
+XML.prototype.parseCommentNode = function(tag) {
+ // Parse Comment Node, e.g. <!-- hello -->
+ var matches = null;
+ this.patNextClose.lastIndex = this.patTag.lastIndex;
+
+ while (!tag.match(this.patEndComment)) {
+ if (matches = this.patNextClose.exec(this.text)) {
+ tag += '>' + matches[1];
+ }
+ else {
+ this.throwParseError( "Unclosed comment tag", tag );
+ return null;
+ }
+ }
+
+ this.patTag.lastIndex = this.patNextClose.lastIndex;
+ return tag;
+};
+
+XML.prototype.parseDTDNode = function(tag) {
+ // Parse Document Type Descriptor Node, e.g. <!DOCTYPE ... >
+ var matches = null;
+
+ if (tag.match(this.patExternalDTDNode)) {
+ // tag is external, and thus self-closing
+ this.dtdNodeList.push( tag );
+ }
+ else if (tag.match(this.patInlineDTDNode)) {
+ // Tag is inline, so check for nested nodes.
+ this.patNextClose.lastIndex = this.patTag.lastIndex;
+
+ while (!tag.match(this.patEndDTD)) {
+ if (matches = this.patNextClose.exec(this.text)) {
+ tag += '>' + matches[1];
+ }
+ else {
+ this.throwParseError( "Unclosed DTD tag", tag );
+ return null;
+ }
+ }
+
+ this.patTag.lastIndex = this.patNextClose.lastIndex;
+
+ // Make sure complete tag is well-formed, and push onto DTD stack.
+ if (tag.match(this.patDTDNode)) {
+ this.dtdNodeList.push( tag );
+ }
+ else {
+ this.throwParseError( "Malformed DTD tag", tag );
+ return null;
+ }
+ }
+ else {
+ this.throwParseError( "Malformed DTD tag", tag );
+ return null;
+ }
+
+ return tag;
+};
+
+XML.prototype.parseCDATANode = function(tag) {
+ // Parse CDATA Node, e.g. <![CDATA[Brooks & Shields]]>
+ var matches = null;
+ this.patNextClose.lastIndex = this.patTag.lastIndex;
+
+ while (!tag.match(this.patEndCDATA)) {
+ if (matches = this.patNextClose.exec(this.text)) {
+ tag += '>' + matches[1];
+ }
+ else {
+ this.throwParseError( "Unclosed CDATA tag", tag );
+ return null;
+ }
+ }
+
+ this.patTag.lastIndex = this.patNextClose.lastIndex;
+
+ if (matches = tag.match(this.patCDATANode)) {
+ return matches[1];
+ }
+ else {
+ this.throwParseError( "Malformed CDATA tag", tag );
+ return null;
+ }
+};
+
+XML.prototype.getTree = function() {
+ // get reference to parsed XML tree
+ return this.tree;
+};
+
+XML.prototype.compose = function() {
+ // compose tree back into XML
+ var raw = compose_xml( this.tree, this.documentNodeName );
+ var body = raw.substring( raw.indexOf("\n") + 1, raw.length );
+ var xml = '';
+
+ if (this.piNodeList.length) {
+ for (var idx = 0, len = this.piNodeList.length; idx < len; idx++) {
+ xml += '<' + this.piNodeList[idx] + '>' + "\n";
+ }
+ }
+ else {
+ xml += xml_header + "\n";
+ }
+
+ if (this.dtdNodeList.length) {
+ for (var idx = 0, len = this.dtdNodeList.length; idx < len; idx++) {
+ xml += '<' + this.dtdNodeList[idx] + '>' + "\n";
+ }
+ }
+
+ xml += body;
+ return xml;
+};
+
+//
+// Static Utility Functions:
+//
+
+var parse_xml = exports.parse = function parse_xml(text, opts) {
+ // turn text into XML tree quickly
+ if (!opts) opts = {};
+ opts.text = text;
+ var parser = new XML(opts);
+ return parser.error() ? parser.getLastError() : parser.getTree();
+};
+
+var trim = exports.trim = function trim(text) {
+ // strip whitespace from beginning and end of string
+ if (text == null) return '';
+
+ if (text && text.replace) {
+ text = text.replace(/^\s+/, "");
+ text = text.replace(/\s+$/, "");
+ }
+
+ return text;
+};
+
+var encode_entities = exports.encodeEntities = function encode_entities(text) {
+ // Simple entitize exports.for = function for composing XML
+ if (text == null) return '';
+
+ if (text && text.replace) {
+ text = text.replace(/\&/g, "&amp;"); // MUST BE FIRST
+ text = text.replace(/</g, "&lt;");
+ text = text.replace(/>/g, "&gt;");
+ }
+
+ return text;
+};
+
+var encode_attrib_entities = exports.encodeAttribEntities = function encode_attrib_entities(text) {
+ // Simple entitize exports.for = function for composing XML attributes
+ if (text == null) return '';
+
+ if (text && text.replace) {
+ text = text.replace(/\&/g, "&amp;"); // MUST BE FIRST
+ text = text.replace(/</g, "&lt;");
+ text = text.replace(/>/g, "&gt;");
+ text = text.replace(/\"/g, "&quot;");
+ text = text.replace(/\'/g, "&apos;");
+ }
+
+ return text;
+};
+
+var decode_entities = exports.decodeEntities = function decode_entities(text) {
+ // Decode XML entities into raw ASCII
+ if (text == null) return '';
+
+ if (text && text.replace && text.match(/\&/)) {
+ text = text.replace(/\&lt\;/g, "<");
+ text = text.replace(/\&gt\;/g, ">");
+ text = text.replace(/\&quot\;/g, '"');
+ text = text.replace(/\&apos\;/g, "'");
+ text = text.replace(/\&amp\;/g, "&"); // MUST BE LAST
+ }
+
+ return text;
+};
+
+var compose_xml = exports.stringify = function compose_xml(node, name, indent) {
+ // Compose node into XML including attributes
+ // Recurse for child nodes
+ var xml = "";
+
+ // If this is the root node, set the indent to 0
+ // and setup the XML header (PI node)
+ if (!indent) {
+ indent = 0;
+ xml = xml_header + "\n";
+
+ if (!name) {
+ // no name provided, assume content is wrapped in it
+ name = first_key(node);
+ node = node[name];
+ }
+ }
+
+ // Setup the indent text
+ var indent_text = "";
+ for (var k = 0; k < indent; k++) indent_text += indent_string;
+
+ if ((typeof(node) == 'object') && (node != null)) {
+ // node is object -- now see if it is an array or hash
+ if (!node.length) { // what about zero-length array?
+ // node is hash
+ xml += indent_text + "<" + name;
+
+ var num_keys = 0;
+ var has_attribs = 0;
+ for (var key in node) num_keys++; // there must be a better way...
+
+ if (node["_Attribs"]) {
+ has_attribs = 1;
+ var sorted_keys = hash_keys_to_array(node["_Attribs"]).sort();
+ for (var idx = 0, len = sorted_keys.length; idx < len; idx++) {
+ var key = sorted_keys[idx];
+ xml += " " + key + "=\"" + encode_attrib_entities(node["_Attribs"][key]) + "\"";
+ }
+ } // has attribs
+
+ if (num_keys > has_attribs) {
+ // has child elements
+ xml += ">";
+
+ if (node["_Data"]) {
+ // simple text child node
+ xml += encode_entities(node["_Data"]) + "</" + name + ">\n";
+ } // just text
+ else {
+ xml += "\n";
+
+ var sorted_keys = hash_keys_to_array(node).sort();
+ for (var idx = 0, len = sorted_keys.length; idx < len; idx++) {
+ var key = sorted_keys[idx];
+ if ((key != "_Attribs") && key.match(re_valid_tag_name)) {
+ // recurse for node, with incremented indent value
+ xml += compose_xml( node[key], key, indent + 1 );
+ } // not _Attribs key
+ } // foreach key
+
+ xml += indent_text + "</" + name + ">\n";
+ } // real children
+ }
+ else {
+ // no child elements, so self-close
+ xml += "/>\n";
+ }
+ } // standard node
+ else {
+ // node is array
+ for (var idx = 0; idx < node.length; idx++) {
+ // recurse for node in array with same indent
+ xml += compose_xml( node[idx], name, indent );
+ }
+ } // array of nodes
+ } // complex node
+ else {
+ // node is simple string
+ xml += indent_text + "<" + name + ">" + encode_entities(node) + "</" + name + ">\n";
+ } // simple text node
+
+ return xml;
+};
+
+var always_array = exports.alwaysArray = function always_array(obj, key) {
+ // if object is not array, return array containing object
+ // if key is passed, work like XMLalwaysarray() instead
+ if (key) {
+ if ((typeof(obj[key]) != 'object') || (typeof(obj[key].length) == 'undefined')) {
+ var temp = obj[key];
+ delete obj[key];
+ obj[key] = new Array();
+ obj[key][0] = temp;
+ }
+ return null;
+ }
+ else {
+ if ((typeof(obj) != 'object') || (typeof(obj.length) == 'undefined')) { return [ obj ]; }
+ else return obj;
+ }
+};
+
+var hash_keys_to_array = exports.hashKeysToArray = function hash_keys_to_array(hash) {
+ // convert hash keys to array (discard values)
+ var array = [];
+ for (var key in hash) array.push(key);
+ return array;
+};
+
+var isa_hash = exports.isaHash = function isa_hash(arg) {
+ // determine if arg is a hash
+ return( !!arg && (typeof(arg) == 'object') && (typeof(arg.length) == 'undefined') );
+};
+
+var isa_array = exports.isaArray = function isa_array(arg) {
+ // determine if arg is an array or is array-like
+ if (typeof(arg) == 'array') return true;
+ return( !!arg && (typeof(arg) == 'object') && (typeof(arg.length) != 'undefined') );
+};
+
+var first_key = exports.firstKey = function first_key(hash) {
+ // return first key from hash (unordered)
+ for (var key in hash) return key;
+ return null; // no keys in hash
+};
+
+var num_keys = exports.numKeys = function num_keys(hash) {
+ // count the number of keys in a hash
+ var count = 0;
+ for (var a in hash) count++;
+ return count;
+};
diff --git a/collectors/node.d.plugin/sma_webbox/Makefile.inc b/collectors/node.d.plugin/sma_webbox/Makefile.inc
new file mode 100644
index 000000000..38f2fe97a
--- /dev/null
+++ b/collectors/node.d.plugin/sma_webbox/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_node_DATA += sma_webbox/sma_webbox.node.js
+# dist_nodeconfig_DATA += sma_webbox/sma_webbox.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += sma_webbox/README.md sma_webbox/Makefile.inc
+
diff --git a/collectors/node.d.plugin/sma_webbox/README.md b/collectors/node.d.plugin/sma_webbox/README.md
new file mode 100644
index 000000000..1512c7008
--- /dev/null
+++ b/collectors/node.d.plugin/sma_webbox/README.md
@@ -0,0 +1,25 @@
+
+[SMA Sunny Webbox](http://files.sma.de/dl/4253/WEBBOX-DUS131916W.pdf)
+
+Example netdata configuration for node.d/sma_webbox.conf
+
+The module supports any number of name servers, like this:
+
+```json
+{
+ "enable_autodetect": false,
+ "update_every": 5,
+ "servers": [
+ {
+ "name": "plant1",
+ "hostname": "10.0.1.1",
+ "update_every": 10
+ },
+ {
+ "name": "plant2",
+ "hostname": "10.0.2.1",
+ "update_every": 15
+ }
+ ]
+}
+```
diff --git a/collectors/node.d.plugin/sma_webbox/sma_webbox.node.js b/collectors/node.d.plugin/sma_webbox/sma_webbox.node.js
new file mode 100644
index 000000000..b9a168adc
--- /dev/null
+++ b/collectors/node.d.plugin/sma_webbox/sma_webbox.node.js
@@ -0,0 +1,238 @@
+'use strict';
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+// This program will connect to one or more SMA Sunny Webboxes
+// to get the Solar Power Generated (current, today, total).
+
+// example configuration in /etc/netdata/node.d/sma_webbox.conf
+/*
+{
+ "enable_autodetect": false,
+ "update_every": 5,
+ "servers": [
+ {
+ "name": "plant1",
+ "hostname": "10.0.1.1",
+ "update_every": 10
+ },
+ {
+ "name": "plant2",
+ "hostname": "10.0.2.1",
+ "update_every": 15
+ }
+ ]
+}
+*/
+
+require('url');
+require('http');
+var netdata = require('netdata');
+
+if(netdata.options.DEBUG === true) netdata.debug('loaded ' + __filename + ' plugin');
+
+var webbox = {
+ name: __filename,
+ enable_autodetect: true,
+ update_every: 1,
+ base_priority: 60000,
+ charts: {},
+
+ processResponse: function(service, data) {
+ if(data !== null) {
+ var r = JSON.parse(data);
+
+ var d = {
+ 'GriPwr': {
+ unit: null,
+ value: null
+ },
+ 'GriEgyTdy': {
+ unit: null,
+ value: null
+ },
+ 'GriEgyTot': {
+ unit: null,
+ value: null
+ }
+ };
+
+ // parse the webbox response
+ // and put it in our d object
+ var found = 0;
+ var len = r.result.overview.length;
+ while(len--) {
+ var e = r.result.overview[len];
+ if(typeof(d[e.meta]) !== 'undefined') {
+ found++;
+ d[e.meta].value = e.value;
+ d[e.meta].unit = e.unit;
+ }
+ }
+
+ // add the service
+ if(found > 0 && service.added !== true)
+ service.commit();
+
+ // Grid Current Power Chart
+ if(d['GriPwr'].value !== null) {
+ var id = 'smawebbox_' + service.name + '.current';
+ var chart = webbox.charts[id];
+
+ if(typeof chart === 'undefined') {
+ chart = {
+ id: id, // the unique id of the chart
+ name: '', // the unique name of the chart
+ title: service.name + ' Current Grid Power', // the title of the chart
+ units: d['GriPwr'].unit, // the units of the chart dimensions
+ family: 'now', // the family of the chart
+ context: 'smawebbox.grid_power', // the context of the chart
+ type: netdata.chartTypes.area, // the type of the chart
+ priority: webbox.base_priority + 1, // the priority relative to others in the same family
+ update_every: service.update_every, // the expected update frequency of the chart
+ dimensions: {
+ 'GriPwr': {
+ id: 'GriPwr', // the unique id of the dimension
+ name: 'power', // the name of the dimension
+ algorithm: netdata.chartAlgorithms.absolute,// the id of the netdata algorithm
+ multiplier: 1, // the multiplier
+ divisor: 1, // the divisor
+ hidden: false // is hidden (boolean)
+ }
+ }
+ };
+
+ chart = service.chart(id, chart);
+ webbox.charts[id] = chart;
+ }
+
+ service.begin(chart);
+ service.set('GriPwr', Math.round(d['GriPwr'].value));
+ service.end();
+ }
+
+ if(d['GriEgyTdy'].value !== null) {
+ var id = 'smawebbox_' + service.name + '.today';
+ var chart = webbox.charts[id];
+
+ if(typeof chart === 'undefined') {
+ chart = {
+ id: id, // the unique id of the chart
+ name: '', // the unique name of the chart
+ title: service.name + ' Today Grid Power', // the title of the chart
+ units: d['GriEgyTdy'].unit, // the units of the chart dimensions
+ family: 'today', // the family of the chart
+ context: 'smawebbox.grid_power_today', // the context of the chart
+ type: netdata.chartTypes.area, // the type of the chart
+ priority: webbox.base_priority + 2, // the priority relative to others in the same family
+ update_every: service.update_every, // the expected update frequency of the chart
+ dimensions: {
+ 'GriEgyTdy': {
+ id: 'GriEgyTdy', // the unique id of the dimension
+ name: 'power', // the name of the dimension
+ algorithm: netdata.chartAlgorithms.absolute,// the id of the netdata algorithm
+ multiplier: 1, // the multiplier
+ divisor: 1000, // the divisor
+ hidden: false // is hidden (boolean)
+ }
+ }
+ };
+
+ chart = service.chart(id, chart);
+ webbox.charts[id] = chart;
+ }
+
+ service.begin(chart);
+ service.set('GriEgyTdy', Math.round(d['GriEgyTdy'].value * 1000));
+ service.end();
+ }
+
+ if(d['GriEgyTot'].value !== null) {
+ var id = 'smawebbox_' + service.name + '.total';
+ var chart = webbox.charts[id];
+
+ if(typeof chart === 'undefined') {
+ chart = {
+ id: id, // the unique id of the chart
+ name: '', // the unique name of the chart
+ title: service.name + ' Total Grid Power', // the title of the chart
+ units: d['GriEgyTot'].unit, // the units of the chart dimensions
+ family: 'total', // the family of the chart
+ context: 'smawebbox.grid_power_total', // the context of the chart
+ type: netdata.chartTypes.area, // the type of the chart
+ priority: webbox.base_priority + 3, // the priority relative to others in the same family
+ update_every: service.update_every, // the expected update frequency of the chart
+ dimensions: {
+ 'GriEgyTot': {
+ id: 'GriEgyTot', // the unique id of the dimension
+ name: 'power', // the name of the dimension
+ algorithm: netdata.chartAlgorithms.absolute,// the id of the netdata algorithm
+ multiplier: 1, // the multiplier
+ divisor: 1000, // the divisor
+ hidden: false // is hidden (boolean)
+ }
+ }
+ };
+
+ chart = service.chart(id, chart);
+ webbox.charts[id] = chart;
+ }
+
+ service.begin(chart);
+ service.set('GriEgyTot', Math.round(d['GriEgyTot'].value * 1000));
+ service.end();
+ }
+ }
+ },
+
+ // module.serviceExecute()
+ // this function is called only from this module
+ // its purpose is to prepare the request and call
+ // netdata.serviceExecute()
+ serviceExecute: function(name, hostname, update_every) {
+ if(netdata.options.DEBUG === true) netdata.debug(this.name + ': ' + name + ': hostname: ' + hostname + ', update_every: ' + update_every);
+
+ var service = netdata.service({
+ name: name,
+ request: netdata.requestFromURL('http://' + hostname + '/rpc'),
+ update_every: update_every,
+ module: this
+ });
+ service.postData = 'RPC={"proc":"GetPlantOverview","format":"JSON","version":"1.0","id":"1"}';
+ service.request.method = 'POST';
+ service.request.headers['Content-Length'] = service.postData.length;
+
+ service.execute(this.processResponse);
+ },
+
+ configure: function(config) {
+ var added = 0;
+
+ if(typeof(config.servers) !== 'undefined') {
+ var len = config.servers.length;
+ while(len--) {
+ if(typeof config.servers[len].update_every === 'undefined')
+ config.servers[len].update_every = this.update_every;
+
+ if(config.servers[len].update_every < 5)
+ config.servers[len].update_every = 5;
+
+ this.serviceExecute(config.servers[len].name, config.servers[len].hostname, config.servers[len].update_every);
+ added++;
+ }
+ }
+
+ return added;
+ },
+
+ // module.update()
+ // this is called repeatidly to collect data, by calling
+ // netdata.serviceExecute()
+ update: function(service, callback) {
+ service.execute(function(serv, data) {
+ service.module.processResponse(serv, data);
+ callback();
+ });
+ },
+};
+
+module.exports = webbox;
diff --git a/collectors/node.d.plugin/snmp/Makefile.inc b/collectors/node.d.plugin/snmp/Makefile.inc
new file mode 100644
index 000000000..26448a1ce
--- /dev/null
+++ b/collectors/node.d.plugin/snmp/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_node_DATA += snmp/snmp.node.js
+# dist_nodeconfig_DATA += snmp/snmp.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += snmp/README.md snmp/Makefile.inc
+
diff --git a/collectors/node.d.plugin/snmp/README.md b/collectors/node.d.plugin/snmp/README.md
new file mode 100644
index 000000000..a307a3642
--- /dev/null
+++ b/collectors/node.d.plugin/snmp/README.md
@@ -0,0 +1,357 @@
+# SNMP Data Collector
+
+Using this collector, netdata can collect data from any SNMP device.
+
+This collector supports:
+
+- any number of SNMP devices
+- each SNMP device can be used to collect data for any number of charts
+- each chart may have any number of dimensions
+- each SNMP device may have a different update frequency
+- each SNMP device will accept one or more batches to report values (you can set `max_request_size` per SNMP server, to control the size of batches).
+
+## Configuration
+
+You will need to create the file `/etc/netdata/node.d/snmp.conf` with data like the following.
+
+In this example:
+
+ - the SNMP device is `10.11.12.8`.
+ - the SNMP community is `public`.
+ - we will update the values every 10 seconds (`update_every: 10` under the server `10.11.12.8`).
+ - we define 2 charts `snmp_switch.bandwidth_port1` and `snmp_switch.bandwidth_port2`, each having 2 dimensions: `in` and `out`.
+
+```json
+{
+ "enable_autodetect": false,
+ "update_every": 5,
+ "max_request_size": 100,
+ "servers": [
+ {
+ "hostname": "10.11.12.8",
+ "community": "public",
+ "update_every": 10,
+ "max_request_size": 50,
+ "options": { "timeout": 10000 },
+ "charts": {
+ "snmp_switch.bandwidth_port1": {
+ "title": "Switch Bandwidth for port 1",
+ "units": "kilobits/s",
+ "type": "area",
+ "priority": 1,
+ "family": "ports",
+ "dimensions": {
+ "in": {
+ "oid": "1.3.6.1.2.1.2.2.1.10.1",
+ "algorithm": "incremental",
+ "multiplier": 8,
+ "divisor": 1024,
+ "offset": 0
+ },
+ "out": {
+ "oid": "1.3.6.1.2.1.2.2.1.16.1",
+ "algorithm": "incremental",
+ "multiplier": -8,
+ "divisor": 1024,
+ "offset": 0
+ }
+ }
+ },
+ "snmp_switch.bandwidth_port2": {
+ "title": "Switch Bandwidth for port 2",
+ "units": "kilobits/s",
+ "type": "area",
+ "priority": 1,
+ "family": "ports",
+ "dimensions": {
+ "in": {
+ "oid": "1.3.6.1.2.1.2.2.1.10.2",
+ "algorithm": "incremental",
+ "multiplier": 8,
+ "divisor": 1024,
+ "offset": 0
+ },
+ "out": {
+ "oid": "1.3.6.1.2.1.2.2.1.16.2",
+ "algorithm": "incremental",
+ "multiplier": -8,
+ "divisor": 1024,
+ "offset": 0
+ }
+ }
+ }
+ }
+ }
+ ]
+}
+```
+
+`update_every` is the update frequency for each server, in seconds.
+
+`max_request_size` limits the maximum number of OIDs that will be requested in a single call. The default is 50. Lower this number of you get `TooBig` errors in netdata error.log.
+
+`family` sets the name of the submenu of the dashboard each chart will appear under.
+
+If you need to define many charts using incremental OIDs, you can use something like this:
+
+This is like the previous, but the option `multiply_range` given, will multiply the current chart from `1` to `24` inclusive, producing 24 charts in total for the 24 ports of the switch `10.11.12.8`.
+
+Each of the 24 new charts will have its id (1-24) appended at:
+
+1. its chart unique id, i.e. `snmp_switch.bandwidth_port1` to `snmp_switch.bandwidth_port24`
+2. its `title`, i.e. `Switch Bandwidth for port 1` to `Switch Bandwidth for port 24`
+3. its `oid` (for all dimensions), i.e. dimension `in` will be `1.3.6.1.2.1.2.2.1.10.1` to `1.3.6.1.2.1.2.2.1.10.24`
+3. its priority (which will be incremented for each chart so that the charts will appear on the dashboard in this order)
+
+```json
+{
+ "enable_autodetect": false,
+ "update_every": 10,
+ "servers": [
+ {
+ "hostname": "10.11.12.8",
+ "community": "public",
+ "update_every": 10,
+ "options": { "timeout": 20000 },
+ "charts": {
+ "snmp_switch.bandwidth_port": {
+ "title": "Switch Bandwidth for port ",
+ "units": "kilobits/s",
+ "type": "area",
+ "priority": 1,
+ "family": "ports",
+ "multiply_range": [ 1, 24 ],
+ "dimensions": {
+ "in": {
+ "oid": "1.3.6.1.2.1.2.2.1.10.",
+ "algorithm": "incremental",
+ "multiplier": 8,
+ "divisor": 1024,
+ "offset": 0
+ },
+ "out": {
+ "oid": "1.3.6.1.2.1.2.2.1.16.",
+ "algorithm": "incremental",
+ "multiplier": -8,
+ "divisor": 1024,
+ "offset": 0
+ }
+ }
+ }
+ }
+ }
+ ]
+}
+```
+
+The `options` given for each server, are:
+
+ - `timeout`, the time to wait for the SNMP device to respond. The default is 5000 ms.
+ - `version`, the SNMP version to use. `0` is Version 1, `1` is Version 2c. The default is Version 1 (`0`).
+ - `transport`, the default is `udp4`.
+ - `port`, the port of the SNMP device to connect to. The default is `161`.
+ - `retries`, the number of attempts to make to fetch the data. The default is `1`.
+
+## Retrieving names from snmp
+
+You can append a value retrieved from SNMP to the title, by adding `titleoid` to the chart.
+
+You can set a dimension name to a value retrieved from SNMP, by adding `oidname` to the dimension.
+
+Both of the above will participate in `multiply_range`.
+
+
+## Testing the configuration
+
+To test it, you can run:
+
+```sh
+/usr/libexec/netdata/plugins.d/node.d.plugin 1 snmp
+```
+
+The above will run it on your console and you will be able to see what netdata sees, but also errors. You can get a very detailed output by appending `debug` to the command line.
+
+If it works, restart netdata to activate the snmp collector and refresh the dashboard (if your SNMP device responds with a delay, you may need to refresh the dashboard in a few seconds).
+
+## Data collection speed
+
+Keep in mind that many SNMP switches and routers are very slow. They may not be able to report values per second. If you run `node.d.plugin` in `debug` mode, it will report the time it took for the SNMP device to respond. My switch, for example, needs 7-8 seconds to respond for the traffic on 24 ports (48 OIDs, in/out).
+
+Also, if you use many SNMP clients on the same SNMP device at the same time, values may be skipped. This is a problem of the SNMP device, not this collector.
+
+## Finding OIDs
+
+Use `snmpwalk`, like this:
+
+```sh
+snmpwalk -t 20 -v 1 -O fn -c public 10.11.12.8
+```
+
+- `-t 20` is the timeout in seconds
+- `-v 1` is the SNMP version
+- `-O fn` will display full OIDs in numeric format (you may want to run it also without this option to see human readable output of OIDs)
+- `-c public` is the SNMP community
+- `10.11.12.8` is the SNMP device
+
+Keep in mind that `snmpwalk` outputs the OIDs with a dot in front them. You should remove this dot when adding OIDs to the configuration file of this collector.
+
+## Example: Linksys SRW2024P
+
+This is what I use for my Linksys SRW2024P. It creates:
+
+1. A chart for power consumption (it is a PoE switch)
+2. Two charts for packets received (total packets received and packets received with errors)
+3. One chart for packets output
+4. 24 charts, one for each port of the switch. It also appends the port names, as defined at the switch, to the chart titles.
+
+This switch also reports various other metrics, like snmp, packets per port, etc. Unfortunately it does not report CPU utilization or backplane utilization.
+
+This switch has a very slow SNMP processors. To respond, it needs about 8 seconds, so I have set the refresh frequency (`update_every`) to 15 seconds.
+
+```json
+{
+ "enable_autodetect": false,
+ "update_every": 5,
+ "servers": [
+ {
+ "hostname": "10.11.12.8",
+ "community": "public",
+ "update_every": 15,
+ "options": { "timeout": 20000, "version": 1 },
+ "charts": {
+ "snmp_switch.power": {
+ "title": "Switch Power Supply",
+ "units": "watts",
+ "type": "line",
+ "priority": 10,
+ "family": "power",
+ "dimensions": {
+ "supply": {
+ "oid": ".1.3.6.1.2.1.105.1.3.1.1.2.1",
+ "algorithm": "absolute",
+ "multiplier": 1,
+ "divisor": 1,
+ "offset": 0
+ },
+ "used": {
+ "oid": ".1.3.6.1.2.1.105.1.3.1.1.4.1",
+ "algorithm": "absolute",
+ "multiplier": 1,
+ "divisor": 1,
+ "offset": 0
+ }
+ }
+ }
+ , "snmp_switch.input": {
+ "title": "Switch Packets Input",
+ "units": "packets/s",
+ "type": "area",
+ "priority": 20,
+ "family": "IP",
+ "dimensions": {
+ "receives": {
+ "oid": ".1.3.6.1.2.1.4.3.0",
+ "algorithm": "incremental",
+ "multiplier": 1,
+ "divisor": 1,
+ "offset": 0
+ }
+ , "discards": {
+ "oid": ".1.3.6.1.2.1.4.8.0",
+ "algorithm": "incremental",
+ "multiplier": 1,
+ "divisor": 1,
+ "offset": 0
+ }
+ }
+ }
+ , "snmp_switch.input_errors": {
+ "title": "Switch Received Packets with Errors",
+ "units": "packets/s",
+ "type": "line",
+ "priority": 30,
+ "family": "IP",
+ "dimensions": {
+ "bad_header": {
+ "oid": ".1.3.6.1.2.1.4.4.0",
+ "algorithm": "incremental",
+ "multiplier": 1,
+ "divisor": 1,
+ "offset": 0
+ }
+ , "bad_address": {
+ "oid": ".1.3.6.1.2.1.4.5.0",
+ "algorithm": "incremental",
+ "multiplier": 1,
+ "divisor": 1,
+ "offset": 0
+ }
+ , "unknown_protocol": {
+ "oid": ".1.3.6.1.2.1.4.7.0",
+ "algorithm": "incremental",
+ "multiplier": 1,
+ "divisor": 1,
+ "offset": 0
+ }
+ }
+ }
+ , "snmp_switch.output": {
+ "title": "Switch Output Packets",
+ "units": "packets/s",
+ "type": "line",
+ "priority": 40,
+ "family": "IP",
+ "dimensions": {
+ "requests": {
+ "oid": ".1.3.6.1.2.1.4.10.0",
+ "algorithm": "incremental",
+ "multiplier": 1,
+ "divisor": 1,
+ "offset": 0
+ }
+ , "discards": {
+ "oid": ".1.3.6.1.2.1.4.11.0",
+ "algorithm": "incremental",
+ "multiplier": -1,
+ "divisor": 1,
+ "offset": 0
+ }
+ , "no_route": {
+ "oid": ".1.3.6.1.2.1.4.12.0",
+ "algorithm": "incremental",
+ "multiplier": -1,
+ "divisor": 1,
+ "offset": 0
+ }
+ }
+ }
+ , "snmp_switch.bandwidth_port": {
+ "title": "Switch Bandwidth for port ",
+ "titleoid": ".1.3.6.1.2.1.31.1.1.1.18.",
+ "units": "kilobits/s",
+ "type": "area",
+ "priority": 100,
+ "family": "ports",
+ "multiply_range": [ 1, 24 ],
+ "dimensions": {
+ "in": {
+ "oid": ".1.3.6.1.2.1.2.2.1.10.",
+ "algorithm": "incremental",
+ "multiplier": 8,
+ "divisor": 1024,
+ "offset": 0
+ }
+ , "out": {
+ "oid": ".1.3.6.1.2.1.2.2.1.16.",
+ "algorithm": "incremental",
+ "multiplier": -8,
+ "divisor": 1024,
+ "offset": 0
+ }
+ }
+ }
+ }
+ }
+ ]
+}
+```
diff --git a/collectors/node.d.plugin/snmp/snmp.node.js b/collectors/node.d.plugin/snmp/snmp.node.js
new file mode 100644
index 000000000..a051d3d3a
--- /dev/null
+++ b/collectors/node.d.plugin/snmp/snmp.node.js
@@ -0,0 +1,516 @@
+'use strict';
+// SPDX-License-Identifier: GPL-3.0-or-later
+// netdata snmp module
+// This program will connect to one or more SNMP Agents
+//
+
+// example configuration in /etc/netdata/node.d/snmp.conf
+/*
+{
+ "enable_autodetect": false,
+ "update_every": 5,
+ "max_request_size": 50,
+ "servers": [
+ {
+ "hostname": "10.11.12.8",
+ "community": "public",
+ "update_every": 10,
+ "max_request_size": 50,
+ "options": { "timeout": 10000 },
+ "charts": {
+ "snmp_switch.bandwidth_port1": {
+ "title": "Switch Bandwidth for port 1",
+ "units": "kilobits/s",
+ "type": "area",
+ "priority": 1,
+ "dimensions": {
+ "in": {
+ "oid": ".1.3.6.1.2.1.2.2.1.10.1",
+ "algorithm": "incremental",
+ "multiplier": 8,
+ "divisor": 1024,
+ "offset": 0
+ },
+ "out": {
+ "oid": ".1.3.6.1.2.1.2.2.1.16.1",
+ "algorithm": "incremental",
+ "multiplier": -8,
+ "divisor": 1024,
+ "offset": 0
+ }
+ }
+ },
+ "snmp_switch.bandwidth_port2": {
+ "title": "Switch Bandwidth for port 2",
+ "units": "kilobits/s",
+ "type": "area",
+ "priority": 1,
+ "dimensions": {
+ "in": {
+ "oid": ".1.3.6.1.2.1.2.2.1.10.2",
+ "algorithm": "incremental",
+ "multiplier": 8,
+ "divisor": 1024,
+ "offset": 0
+ },
+ "out": {
+ "oid": ".1.3.6.1.2.1.2.2.1.16.2",
+ "algorithm": "incremental",
+ "multiplier": -8,
+ "divisor": 1024,
+ "offset": 0
+ }
+ }
+ }
+ }
+ }
+ ]
+}
+*/
+
+// You can also give ranges of charts like the following.
+// This will append 1-24 to id, title, oid (on each dimension)
+// so that 24 charts will be created.
+/*
+{
+ "enable_autodetect": false,
+ "update_every": 10,
+ "max_request_size": 50,
+ "servers": [
+ {
+ "hostname": "10.11.12.8",
+ "community": "public",
+ "update_every": 10,
+ "max_request_size": 50,
+ "options": { "timeout": 20000 },
+ "charts": {
+ "snmp_switch.bandwidth_port": {
+ "title": "Switch Bandwidth for port ",
+ "units": "kilobits/s",
+ "type": "area",
+ "priority": 1,
+ "multiply_range": [ 1, 24 ],
+ "dimensions": {
+ "in": {
+ "oid": ".1.3.6.1.2.1.2.2.1.10.",
+ "algorithm": "incremental",
+ "multiplier": 8,
+ "divisor": 1024,
+ "offset": 0
+ },
+ "out": {
+ "oid": ".1.3.6.1.2.1.2.2.1.16.",
+ "algorithm": "incremental",
+ "multiplier": -8,
+ "divisor": 1024,
+ "offset": 0
+ }
+ }
+ }
+ }
+ }
+ ]
+}
+*/
+
+var net_snmp = require('net-snmp');
+var extend = require('extend');
+var netdata = require('netdata');
+
+if(netdata.options.DEBUG === true) netdata.debug('loaded', __filename, ' plugin');
+
+netdata.processors.snmp = {
+ name: 'snmp',
+
+ fixoid: function(oid) {
+ if(typeof oid !== 'string')
+ return oid;
+
+ if(oid.charAt(0) === '.')
+ return oid.substring(1, oid.length);
+
+ return oid;
+ },
+
+ prepare: function(service) {
+ var __DEBUG = netdata.options.DEBUG;
+
+ if(typeof service.snmp_oids === 'undefined' || service.snmp_oids === null || service.snmp_oids.length === 0) {
+ // this is the first time we see this service
+
+ if(__DEBUG === true)
+ netdata.debug(service.module.name + ': ' + service.name + ': preparing ' + this.name + ' OIDs');
+
+ // build an index of all OIDs
+ service.snmp_oids_index = {};
+ var chart_keys = Object.keys(service.request.charts);
+ var chart_keys_len = chart_keys.length;
+ while(chart_keys_len--) {
+ var c = chart_keys[chart_keys_len];
+ var chart = service.request.charts[c];
+
+ // for each chart
+
+ if(__DEBUG === true)
+ netdata.debug(service.module.name + ': ' + service.name + ': indexing ' + this.name + ' chart: ' + c);
+
+ if(typeof chart.titleoid !== 'undefined') {
+ service.snmp_oids_index[this.fixoid(chart.titleoid)] = {
+ type: 'title',
+ link: chart
+ };
+ }
+
+ var dim_keys = Object.keys(chart.dimensions);
+ var dim_keys_len = dim_keys.length;
+ while(dim_keys_len--) {
+ var d = dim_keys[dim_keys_len];
+ var dim = chart.dimensions[d];
+
+ // for each dimension in the chart
+
+ var oid = this.fixoid(dim.oid);
+ var oidname = this.fixoid(dim.oidname);
+
+ if(__DEBUG === true)
+ netdata.debug(service.module.name + ': ' + service.name + ': indexing ' + this.name + ' chart: ' + c + ', dimension: ' + d + ', OID: ' + oid + ", OID name: " + oidname);
+
+ // link it to the point we need to set the value to
+ service.snmp_oids_index[oid] = {
+ type: 'value',
+ link: dim
+ };
+
+ if(typeof oidname !== 'undefined')
+ service.snmp_oids_index[oidname] = {
+ type: 'name',
+ link: dim
+ };
+
+ // and set the value to null
+ dim.value = null;
+ }
+ }
+
+ if(__DEBUG === true)
+ netdata.debug(service.module.name + ': ' + service.name + ': indexed ' + this.name + ' OIDs: ' + netdata.stringify(service.snmp_oids_index));
+
+ // now create the array of OIDs needed by net-snmp
+ service.snmp_oids = Object.keys(service.snmp_oids_index);
+
+ if(__DEBUG === true)
+ netdata.debug(service.module.name + ': ' + service.name + ': final list of ' + this.name + ' OIDs: ' + netdata.stringify(service.snmp_oids));
+
+ service.snmp_oids_cleaned = 0;
+ }
+ else if(service.snmp_oids_cleaned === 0) {
+ service.snmp_oids_cleaned = 1;
+
+ // the second time, keep only values
+
+ service.snmp_oids = new Array();
+ var oid_keys = Object.keys(service.snmp_oids_index);
+ var oid_keys_len = oid_keys.length;
+ while(oid_keys_len--) {
+ if (service.snmp_oids_index[oid_keys[oid_keys_len]].type === 'value')
+ service.snmp_oids.push(oid_keys[oid_keys_len]);
+ }
+ }
+ },
+
+ getdata: function(service, index, ok, failed, callback) {
+ var __DEBUG = netdata.options.DEBUG;
+ var that = this;
+
+ if(index >= service.snmp_oids.length) {
+ callback((ok > 0)?{ ok: ok, failed: failed }:null);
+ return;
+ }
+
+ var slice;
+ if(service.snmp_oids.length <= service.request.max_request_size) {
+ slice = service.snmp_oids;
+ index = service.snmp_oids.length;
+ }
+ else if(service.snmp_oids.length - index <= service.request.max_request_size) {
+ slice = service.snmp_oids.slice(index, service.snmp_oids.length);
+ index = service.snmp_oids.length;
+ }
+ else {
+ slice = service.snmp_oids.slice(index, index + service.request.max_request_size);
+ index += service.request.max_request_size;
+ }
+
+ if(__DEBUG === true)
+ netdata.debug(service.module.name + ': ' + service.name + ': making ' + slice.length + ' entries request, max is: ' + service.request.max_request_size);
+
+ service.snmp_session.get(slice, function(error, varbinds) {
+ if(error) {
+ service.error('Received error = ' + netdata.stringify(error) + ' varbinds = ' + netdata.stringify(varbinds));
+
+ // make all values null
+ var len = slice.length;
+ while(len--)
+ service.snmp_oids_index[slice[len]].value = null;
+ }
+ else {
+ if(__DEBUG === true)
+ netdata.debug(service.module.name + ': ' + service.name + ': got valid ' + service.module.name + ' response: ' + netdata.stringify(varbinds));
+
+ var varbinds_len = varbinds.length;
+ for(var i = 0; i < varbinds_len ; i++) {
+ var value = null;
+
+ if(net_snmp.isVarbindError(varbinds[i])) {
+ if(__DEBUG === true)
+ netdata.debug(service.module.name + ': ' + service.name + ': failed ' + service.module.name + ' get for OIDs ' + varbinds[i].oid);
+
+ service.error('OID ' + varbinds[i].oid + ' gave error: ' + snmp.varbindError(varbinds[i]));
+ value = null;
+ failed++;
+ }
+ else {
+ // test fom Counter64
+ // varbinds[i].type = net_snmp.ObjectType.Counter64;
+ // varbinds[i].value = new Buffer([0x34, 0x49, 0x2e, 0xdc, 0xd1]);
+
+ switch(varbinds[i].type) {
+ case net_snmp.ObjectType.OctetString:
+ if (service.snmp_oids_index[varbinds[i].oid].type !== 'title' && service.snmp_oids_index[varbinds[i].oid].type !== 'name') {
+ // parse floating point values, exposed as strings
+ value = parseFloat(varbinds[i].value) * 1000;
+ if (__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': found ' + service.module.name + ' value of OIDs ' + varbinds[i].oid + ", ObjectType " + net_snmp.ObjectType[varbinds[i].type] + " (" + netdata.stringify(varbinds[i].type) + "), typeof(" + typeof(varbinds[i].value) + "), in JSON: " + netdata.stringify(varbinds[i].value) + ", value = " + value.toString() + " (parsed as float in string)");
+ }
+ else {
+ // just use the string
+ value = varbinds[i].value;
+ if (__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': found ' + service.module.name + ' value of OIDs ' + varbinds[i].oid + ", ObjectType " + net_snmp.ObjectType[varbinds[i].type] + " (" + netdata.stringify(varbinds[i].type) + "), typeof(" + typeof(varbinds[i].value) + "), in JSON: " + netdata.stringify(varbinds[i].value) + ", value = " + value.toString() + " (parsed as string)");
+ }
+ break;
+
+ case net_snmp.ObjectType.Counter64:
+ // copy the buffer
+ value = '0x' + varbinds[i].value.toString('hex');
+ if(__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': found ' + service.module.name + ' value of OIDs ' + varbinds[i].oid + ", ObjectType " + net_snmp.ObjectType[varbinds[i].type] + " (" + netdata.stringify(varbinds[i].type) + "), typeof(" + typeof(varbinds[i].value) + "), in JSON: " + netdata.stringify(varbinds[i].value) + ", value = " + value.toString() + " (parsed as buffer)");
+ break;
+
+ case net_snmp.ObjectType.Integer:
+ case net_snmp.ObjectType.Counter:
+ case net_snmp.ObjectType.Gauge:
+ default:
+ value = varbinds[i].value;
+ if(__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': found ' + service.module.name + ' value of OIDs ' + varbinds[i].oid + ", ObjectType " + net_snmp.ObjectType[varbinds[i].type] + " (" + netdata.stringify(varbinds[i].type) + "), typeof(" + typeof(varbinds[i].value) + "), in JSON: " + netdata.stringify(varbinds[i].value) + ", value = " + value.toString() + " (parsed as number)");
+ break;
+ }
+
+ ok++;
+ }
+
+ if(value !== null) {
+ switch(service.snmp_oids_index[varbinds[i].oid].type) {
+ case 'title': service.snmp_oids_index[varbinds[i].oid].link.title += ' ' + value; break;
+ case 'name' : service.snmp_oids_index[varbinds[i].oid].link.name = value.toString().replace(/\W/g, '_'); break;
+ case 'value': service.snmp_oids_index[varbinds[i].oid].link.value = value; break;
+ }
+ }
+ }
+
+ if(__DEBUG === true)
+ netdata.debug(service.module.name + ': ' + service.name + ': finished ' + service.module.name + ' with ' + ok + ' successful and ' + failed + ' failed values');
+ }
+ that.getdata(service, index, ok, failed, callback);
+ });
+ },
+
+ process: function(service, callback) {
+ var __DEBUG = netdata.options.DEBUG;
+
+ this.prepare(service);
+
+ if(service.snmp_oids.length === 0) {
+ // no OIDs found for this service
+
+ if(__DEBUG === true)
+ service.error('no OIDs to process.');
+
+ callback(null);
+ return;
+ }
+
+ if(typeof service.snmp_session === 'undefined' || service.snmp_session === null) {
+ // no SNMP session has been created for this service
+ // the SNMP session is just the initialization of NET-SNMP
+
+ if(__DEBUG === true)
+ netdata.debug(service.module.name + ': ' + service.name + ': opening ' + this.name + ' session on ' + service.request.hostname + ' community ' + service.request.community + ' options ' + netdata.stringify(service.request.options));
+
+ // create the SNMP session
+ service.snmp_session = net_snmp.createSession (service.request.hostname, service.request.community, service.request.options);
+
+ if(__DEBUG === true)
+ netdata.debug(service.module.name + ': ' + service.name + ': got ' + this.name + ' session: ' + netdata.stringify(service.snmp_session));
+
+ // if we later need traps, this is how to do it:
+ //service.snmp_session.trap(net_snmp.TrapType.LinkDown, function(error) {
+ // if(error) console.error('trap error: ' + netdata.stringify(error));
+ //});
+ }
+
+ // do it, get the SNMP values for the sessions we need
+ this.getdata(service, 0, 0, 0, callback);
+ }
+};
+
+var snmp = {
+ name: __filename,
+ enable_autodetect: true,
+ update_every: 1,
+ base_priority: 50000,
+
+ charts: {},
+
+ processResponse: function(service, data) {
+ if(data !== null) {
+ if(service.added !== true)
+ service.commit();
+
+ var chart_keys = Object.keys(service.request.charts);
+ var chart_keys_len = chart_keys.length;
+ for(var i = 0; i < chart_keys_len; i++) {
+ var c = chart_keys[i];
+
+ var chart = snmp.charts[c];
+ if(typeof chart === 'undefined') {
+ chart = service.chart(c, service.request.charts[c]);
+ snmp.charts[c] = chart;
+ }
+
+ service.begin(chart);
+
+ var dimensions = service.request.charts[c].dimensions;
+ var dim_keys = Object.keys(dimensions);
+ var dim_keys_len = dim_keys.length;
+ for(var j = 0; j < dim_keys_len ; j++) {
+ var d = dim_keys[j];
+
+ if (dimensions[d].value !== null) {
+ if(typeof dimensions[d].offset === 'number')
+ service.set(d, dimensions[d].value + dimensions[d].offset);
+ else
+ service.set(d, dimensions[d].value);
+ }
+ }
+
+ service.end();
+ }
+ }
+ },
+
+ // module.serviceExecute()
+ // this function is called only from this module
+ // its purpose is to prepare the request and call
+ // netdata.serviceExecute()
+ serviceExecute: function(conf) {
+ var __DEBUG = netdata.options.DEBUG;
+
+ if(__DEBUG === true)
+ netdata.debug(this.name + ': snmp hostname: ' + conf.hostname + ', update_every: ' + conf.update_every);
+
+ var service = netdata.service({
+ name: conf.hostname,
+ request: conf,
+ update_every: conf.update_every,
+ module: this,
+ processor: netdata.processors.snmp
+ });
+
+ // multiply the charts, if required
+ var chart_keys = Object.keys(service.request.charts);
+ var chart_keys_len = chart_keys.length;
+ for( var i = 0; i < chart_keys_len ; i++ ) {
+ var c = chart_keys[i];
+ var service_request_chart = service.request.charts[c];
+
+ if(__DEBUG === true)
+ netdata.debug(this.name + ': snmp hostname: ' + conf.hostname + ', examining chart: ' + c);
+
+ if(typeof service_request_chart.update_every === 'undefined')
+ service_request_chart.update_every = service.update_every;
+
+ if(typeof service_request_chart.multiply_range !== 'undefined') {
+ var from = service_request_chart.multiply_range[0];
+ var to = service_request_chart.multiply_range[1];
+ var prio = service_request_chart.priority || 1;
+
+ if(prio < snmp.base_priority) prio += snmp.base_priority;
+
+ while(from <= to) {
+ var id = c + from.toString();
+ var chart = extend(true, {}, service_request_chart);
+ chart.title += from.toString();
+
+ if(typeof chart.titleoid !== 'undefined')
+ chart.titleoid += from.toString();
+
+ chart.priority = prio++;
+
+ var dim_keys = Object.keys(chart.dimensions);
+ var dim_keys_len = dim_keys.length;
+ for(var j = 0; j < dim_keys_len ; j++) {
+ var d = dim_keys[j];
+
+ chart.dimensions[d].oid += from.toString();
+
+ if(typeof chart.dimensions[d].oidname !== 'undefined')
+ chart.dimensions[d].oidname += from.toString();
+ }
+ service.request.charts[id] = chart;
+ from++;
+ }
+
+ delete service.request.charts[c];
+ }
+ else {
+ if(service.request.charts[c].priority < snmp.base_priority)
+ service.request.charts[c].priority += snmp.base_priority;
+ }
+ }
+
+ service.execute(this.processResponse);
+ },
+
+ configure: function(config) {
+ var added = 0;
+
+ if(typeof config.max_request_size === 'undefined')
+ config.max_request_size = 50;
+
+ if(typeof(config.servers) !== 'undefined') {
+ var len = config.servers.length;
+ while(len--) {
+ if(typeof config.servers[len].update_every === 'undefined')
+ config.servers[len].update_every = this.update_every;
+
+ if(typeof config.servers[len].max_request_size === 'undefined')
+ config.servers[len].max_request_size = config.max_request_size;
+
+ this.serviceExecute(config.servers[len]);
+ added++;
+ }
+ }
+
+ return added;
+ },
+
+ // module.update()
+ // this is called repeatidly to collect data, by calling
+ // service.execute()
+ update: function(service, callback) {
+ service.execute(function(serv, data) {
+ service.module.processResponse(serv, data);
+ callback();
+ });
+ }
+};
+
+module.exports = snmp;
diff --git a/collectors/node.d.plugin/stiebeleltron/Makefile.inc b/collectors/node.d.plugin/stiebeleltron/Makefile.inc
new file mode 100644
index 000000000..0c6e1e213
--- /dev/null
+++ b/collectors/node.d.plugin/stiebeleltron/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_node_DATA += stiebeleltron/stiebeleltron.node.js
+# dist_nodeconfig_DATA += stiebeleltron/stiebeleltron.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += stiebeleltron/README.md stiebeleltron/Makefile.inc
+
diff --git a/collectors/node.d.plugin/stiebeleltron/README.md b/collectors/node.d.plugin/stiebeleltron/README.md
new file mode 100644
index 000000000..002a31571
--- /dev/null
+++ b/collectors/node.d.plugin/stiebeleltron/README.md
@@ -0,0 +1,505 @@
+# stiebel eltron
+
+This module collects metrics from the configured heat pump and hot water installation from Stiebel Eltron ISG web.
+
+**Requirements**
+ * Configuration file `stiebeleltron.conf` in the node.d netdata config dir (default: `/etc/netdata/node.d/stiebeleltron.conf`)
+ * Stiebel Eltron ISG web with network access (http), without password login
+
+The charts are configurable, however, the provided default configuration collects the following:
+
+1. **General**
+ * Outside temperature in C
+ * Condenser temperature in C
+ * Heating circuit pressure in bar
+ * Flow rate in l/min
+ * Output of water and heat pumps in %
+
+2. **Heating**
+ * Heat circuit 1 temperature in C (set/actual)
+ * Heat circuit 2 temperature in C (set/actual)
+ * Flow temperature in C (set/actual)
+ * Buffer temperature in C (set/actual)
+ * Pre-flow temperature in C
+
+3. **Hot Water**
+ * Hot water temperature in C (set/actual)
+
+4. **Room Temperature**
+ * Heat circuit 1 room temperature in C (set/actual)
+ * Heat circuit 2 room temperature in C (set/actual)
+
+5. **Eletric Reheating**
+ * Dual Mode Reheating temperature in C (hot water/heating)
+
+6. **Process Data**
+ * Remaining compressor rest time in s
+
+7. **Runtime**
+ * Compressor runtime hours (hot water/heating)
+ * Reheating runtime hours (reheating 1/reheating 2)
+
+8. **Energy**
+ * Compressor today in kWh (hot water/heating)
+ * Compressor Total in kWh (hot water/heating)
+
+
+### configuration
+
+If no configuration is given, the module will be disabled. Each `update_every` is optional, the default is `10`.
+
+---
+
+[Stiebel Eltron Heat pump system with ISG](https://www.stiebel-eltron.com/en/home/products-solutions/renewables/controller_energymanagement/internet_servicegateway/isg_web.html)
+
+Original author: BrainDoctor (github)
+
+The module supports any metrics that are parseable with RegEx. There is no API that gives direct access to the values (AFAIK), so the "workaround" is to parse the HTML output of the ISG.
+
+### Testing
+This plugin has been tested within the following environment:
+ * ISG version: 8.5.6
+ * MFG version: 12
+ * Controller version: 9
+ * July (summer time, not much activity)
+ * Interface language: English
+ * login- and password-less ISG web access (without HTTPS it's useless anyway)
+ * Heatpump model: WPL 25 I-2
+ * Hot water boiler model: 820 WT 1
+
+So, if the language is set to english, copy the following configuration into `/etc/netdata/node.d/stiebeleltron.conf` and change the `url`s.
+
+In my case, the ISG is relatively slow with responding (at least 1s, but also up to 4s). Collecting metrics every 10s is more than enough for me.
+
+### How to update the config
+
+* The dimensions support variable digits, the default is `1`. Most of the values printed by ISG are using 1 digit, some use 2.
+* The dimensions also support the `multiplier` and `divisor` attributes, however the divisor gets overridden by `digits`, if specified. Default is `1`.
+* The test string for the regex is always the whole HTML output from the url. For each parameter you need to have a regular expression that extracts the value from the HTML source in the first capture group.
+ Recommended: [regexr.com](https://regexr.com/) for testing and matching, [freeformatter.com](https://www.freeformatter.com/json-escape.html) for escaping the newly created regex for the JSON config.
+
+The charts are being generated using the configuration below. So if your installation is in another language or has other metrics, just adapt the structure or regexes.
+### Configuration template
+```json
+{
+ "enable_autodetect": false,
+ "update_every": 10,
+ "pages": [
+ {
+ "name": "System",
+ "id": "system",
+ "url": "http://machine.ip.or.dns/?s=1,0",
+ "update_every": 10,
+ "categories": [
+ {
+ "id": "eletricreheating",
+ "name": "electric reheating",
+ "charts": [
+ {
+ "title": "Dual Mode Reheating Temperature",
+ "id": "reheatingtemp",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 1,
+ "dimensions": [
+ {
+ "name": "Heating",
+ "id": "dualmodeheatingtemp",
+ "regex": "DUAL MODE TEMP HEATING<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ },
+ {
+ "name": "Hot Water",
+ "id" : "dualmodehotwatertemp",
+ "regex": "DUAL MODE TEMP DHW<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "id": "roomtemp",
+ "name": "room temperature",
+ "charts": [
+ {
+ "title": "Heat Circuit 1",
+ "id": "hc1",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 1,
+ "dimensions": [
+ {
+ "name": "Actual",
+ "id": "actual",
+ "regex": "<tr class=\"even\">\\s*<td.*>ACTUAL TEMPERATURE HC 1<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
+ },
+ {
+ "name": "Set",
+ "id" : "set",
+ "regex": "<tr class=\"odd\">\\s*<td.*>SET TEMPERATURE HC 1<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
+ }
+ ]
+ },
+ {
+ "title": "Heat Circuit 2",
+ "id": "hc2",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 2,
+ "dimensions": [
+ {
+ "name": "Actual",
+ "id": "actual",
+ "regex": "<tr class=\"even\">\\s*<td.*>ACTUAL TEMPERATURE HC 2<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
+ },
+ {
+ "name": "Set",
+ "id" : "set",
+ "regex": "<tr class=\"odd\">\\s*<td.*>SET TEMPERATURE HC 2<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "id": "heating",
+ "name": "heating",
+ "charts": [
+ {
+ "title": "Heat Circuit 1",
+ "id": "hc1",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 1,
+ "dimensions": [
+ {
+ "name": "Actual",
+ "id": "actual",
+ "regex": "<tr class=\"odd\">\\s*<td.*>ACTUAL TEMPERATURE HC 1<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
+ },
+ {
+ "name": "Set",
+ "id" : "set",
+ "regex": "<tr class=\"even\">\\s*<td.*>SET TEMPERATURE HC 1<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
+ }
+ ]
+ },
+ {
+ "title": "Heat Circuit 2",
+ "id": "hc2",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 2,
+ "dimensions": [
+ {
+ "name": "Actual",
+ "id": "actual",
+ "regex": "<tr class=\"odd\">\\s*<td.*>ACTUAL TEMPERATURE HC 2<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
+ },
+ {
+ "name": "Set",
+ "id" : "set",
+ "regex": "<tr class=\"even\">\\s*<td.*>SET TEMPERATURE HC 2<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
+ }
+ ]
+ },
+ {
+ "title": "Flow Temperature",
+ "id": "flowtemp",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 3,
+ "dimensions": [
+ {
+ "name": "Heating",
+ "id": "heating",
+ "regex": "ACTUAL FLOW TEMPERATURE WP<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ },
+ {
+ "name": "Reheating",
+ "id" : "reheating",
+ "regex": "ACTUAL FLOW TEMPERATURE NHZ<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ }
+ ]
+ },
+ {
+ "title": "Buffer Temperature",
+ "id": "buffertemp",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 4,
+ "dimensions": [
+ {
+ "name": "Actual",
+ "id": "actual",
+ "regex": "ACTUAL BUFFER TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ },
+ {
+ "name": "Set",
+ "id" : "set",
+ "regex": "SET BUFFER TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ }
+ ]
+ },
+ {
+ "title": "Fixed Temperature",
+ "id": "fixedtemp",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 5,
+ "dimensions": [
+ {
+ "name": "Set",
+ "id" : "setfixed",
+ "regex": "SET FIXED TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ }
+ ]
+ },
+ {
+ "title": "Pre-flow Temperature",
+ "id": "preflowtemp",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 6,
+ "dimensions": [
+ {
+ "name": "Actual",
+ "id": "actualreturn",
+ "regex": "ACTUAL RETURN TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "id": "hotwater",
+ "name": "hot water",
+ "charts": [
+ {
+ "title": "Hot Water Temperature",
+ "id": "hotwatertemp",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 1,
+ "dimensions": [
+ {
+ "name": "Actual",
+ "id": "actual",
+ "regex": "ACTUAL TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ },
+ {
+ "name": "Set",
+ "id" : "set",
+ "regex": "SET TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "id": "general",
+ "name": "general",
+ "charts": [
+ {
+ "title": "Outside Temperature",
+ "id": "outside",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 1,
+ "dimensions": [
+ {
+ "name": "Outside temperature",
+ "id": "outsidetemp",
+ "regex": "OUTSIDE TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
+ }
+ ]
+ },
+ {
+ "title": "Condenser Temperature",
+ "id": "condenser",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 2,
+ "dimensions": [
+ {
+ "name": "Condenser",
+ "id": "condenser",
+ "regex": "CONDENSER TEMP\\.<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ }
+ ]
+ },
+ {
+ "title": "Heating Circuit Pressure",
+ "id": "heatingcircuit",
+ "unit": "bar",
+ "type": "line",
+ "prio": 3,
+ "dimensions": [
+ {
+ "name": "Heating Circuit",
+ "id": "heatingcircuit",
+ "digits": 2,
+ "regex": "PRESSURE HTG CIRC<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]*).*<\\\/td>"
+ }
+ ]
+ },
+ {
+ "title": "Flow Rate",
+ "id": "flowrate",
+ "unit": "liters/min",
+ "type": "line",
+ "prio": 4,
+ "dimensions": [
+ {
+ "name": "Flow Rate",
+ "id": "flowrate",
+ "digits": 2,
+ "regex": "FLOW RATE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ }
+ ]
+ },
+ {
+ "title": "Output",
+ "id": "output",
+ "unit": "%",
+ "type": "line",
+ "prio": 5,
+ "dimensions": [
+ {
+ "name": "Heat Pump",
+ "id": "outputheatpump",
+ "regex": "OUTPUT HP<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*).*<\\\/td>"
+ },
+ {
+ "name": "Water Pump",
+ "id": "intpumprate",
+ "regex": "INT PUMP RATE<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*).*<\\\/td>"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "name": "Heat Pump",
+ "id": "heatpump",
+ "url": "http://machine.ip.or.dns/?s=1,1",
+ "update_every": 10,
+ "categories": [
+ {
+ "id": "runtime",
+ "name": "runtime",
+ "charts": [
+ {
+ "title": "Compressor",
+ "id": "compressor",
+ "unit": "h",
+ "type": "line",
+ "prio": 1,
+ "dimensions": [
+ {
+ "name": "Heating",
+ "id": "heating",
+ "regex": "RNT COMP 1 HEA<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
+ },
+ {
+ "name": "Hot Water",
+ "id" : "hotwater",
+ "regex": "RNT COMP 1 DHW<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
+ }
+ ]
+ },
+ {
+ "title": "Reheating",
+ "id": "reheating",
+ "unit": "h",
+ "type": "line",
+ "prio": 2,
+ "dimensions": [
+ {
+ "name": "Reheating 1",
+ "id": "rh1",
+ "regex": "BH 1<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
+ },
+ {
+ "name": "Reheating 2",
+ "id" : "rh2",
+ "regex": "BH 2<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "id": "processdata",
+ "name": "process data",
+ "charts": [
+ {
+ "title": "Remaining Compressor Rest Time",
+ "id": "remaincomp",
+ "unit": "s",
+ "type": "line",
+ "prio": 1,
+ "dimensions": [
+ {
+ "name": "Timer",
+ "id": "timer",
+ "regex": "COMP DLAY CNTR<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "id": "energy",
+ "name": "energy",
+ "charts": [
+ {
+ "title": "Compressor Today",
+ "id": "compressorday",
+ "unit": "kWh",
+ "type": "line",
+ "prio": 1,
+ "dimensions": [
+ {
+ "name": "Heating",
+ "id": "heating",
+ "digits": 3,
+ "regex": "COMPRESSOR HEATING DAY<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
+ },
+ {
+ "name": "Hot Water",
+ "id": "hotwater",
+ "digits": 3,
+ "regex": "COMPRESSOR DHW DAY<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
+ }
+ ]
+ },
+ {
+ "title": "Compressor Total",
+ "id": "compressortotal",
+ "unit": "MWh",
+ "type": "line",
+ "prio": 2,
+ "dimensions": [
+ {
+ "name": "Heating",
+ "id": "heating",
+ "digits": 3,
+ "regex": "COMPRESSOR HEATING TOTAL<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
+ },
+ {
+ "name": "Hot Water",
+ "id": "hotwater",
+ "digits": 3,
+ "regex": "COMPRESSOR DHW TOTAL<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+}
+```
diff --git a/collectors/node.d.plugin/stiebeleltron/stiebeleltron.node.js b/collectors/node.d.plugin/stiebeleltron/stiebeleltron.node.js
new file mode 100644
index 000000000..250c26540
--- /dev/null
+++ b/collectors/node.d.plugin/stiebeleltron/stiebeleltron.node.js
@@ -0,0 +1,197 @@
+'use strict';
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+// This program will connect to one Stiebel Eltron ISG for heatpump heating
+// to get the heat pump metrics.
+
+// example configuration in netdata/conf.d/node.d/stiebeleltron.conf.md
+
+require("url");
+require("http");
+var netdata = require("netdata");
+
+netdata.debug("loaded " + __filename + " plugin");
+
+var stiebeleltron = {
+ name: "Stiebel Eltron",
+ enable_autodetect: false,
+ update_every: 10,
+ base_priority: 60000,
+ charts: {},
+ pages: {},
+
+ createBasicDimension: function (id, name, multiplier, divisor) {
+ return {
+ id: id, // the unique id of the dimension
+ name: name, // the name of the dimension
+ algorithm: netdata.chartAlgorithms.absolute,// the id of the netdata algorithm
+ multiplier: multiplier, // the multiplier
+ divisor: divisor, // the divisor
+ hidden: false // is hidden (boolean)
+ };
+ },
+
+ processResponse: function (service, html) {
+ if (html === null) return;
+
+ // add the service
+ service.commit();
+
+ var page = stiebeleltron.pages[service.name];
+ var categories = page.categories;
+ var categoriesCount = categories.length;
+ while (categoriesCount--) {
+ var context = {
+ html: html,
+ service: service,
+ category: categories[categoriesCount],
+ page: page,
+ chartDefinition: null,
+ dimension: null
+ };
+ stiebeleltron.processCategory(context);
+
+ }
+ },
+
+ processCategory: function (context) {
+ var charts = context.category.charts;
+ var chartCount = charts.length;
+ while (chartCount--) {
+ context.chartDefinition = charts[chartCount];
+ stiebeleltron.processChart(context);
+ }
+ },
+
+ processChart: function (context) {
+ var dimensions = context.chartDefinition.dimensions;
+ var dimensionCount = dimensions.length;
+ context.service.begin(stiebeleltron.getChartFromContext(context));
+
+ while (dimensionCount--) {
+ context.dimension = dimensions[dimensionCount];
+ stiebeleltron.processDimension(context);
+ }
+ context.service.end();
+ },
+
+ processDimension: function (context) {
+ var dimension = context.dimension;
+ var match = new RegExp(dimension.regex).exec(context.html);
+ if (match === null) return;
+ var value = match[1].replace(",", ".");
+ // most values have a single digit by default, which requires the values to be multiplied. can be overridden.
+ if (stiebeleltron.isDefined(dimension.digits)) {
+ value *= Math.pow(10, dimension.digits);
+ } else {
+ value *= 10;
+ }
+ context.service.set(stiebeleltron.getDimensionId(context), value);
+ },
+
+ getChartFromContext: function (context) {
+ var chartId = this.getChartId(context);
+ var chart = stiebeleltron.charts[chartId];
+ if (stiebeleltron.isDefined(chart)) return chart;
+
+ var chartDefinition = context.chartDefinition;
+ var service = context.service;
+ var dimensions = {};
+
+ var dimCount = chartDefinition.dimensions.length;
+ while (dimCount--) {
+ var dim = chartDefinition.dimensions[dimCount];
+ var multiplier = 1;
+ var divisor = 10;
+ if (stiebeleltron.isDefined(dim.digits)) divisor = Math.pow(10, Math.max(0, dim.digits));
+ if (stiebeleltron.isDefined(dim.multiplier)) multiplier = dim.multiplier;
+ if (stiebeleltron.isDefined(dim.divisor)) divisor = dim.divisor;
+ context.dimension = dim;
+ var dimId = this.getDimensionId(context);
+ dimensions[dimId] = this.createBasicDimension(dimId, dim.name, multiplier, divisor);
+ }
+
+ chart = {
+ id: chartId,
+ name: '',
+ title: chartDefinition.title,
+ units: chartDefinition.unit,
+ family: context.category.name,
+ context: 'stiebeleltron.' + context.category.id + '.' + chartDefinition.id,
+ type: chartDefinition.type,
+ priority: stiebeleltron.base_priority + chartDefinition.prio,// the priority relative to others in the same family
+ update_every: service.update_every, // the expected update frequency of the chart
+ dimensions: dimensions
+ };
+ chart = service.chart(chartId, chart);
+ stiebeleltron.charts[chartId] = chart;
+
+ return chart;
+ },
+
+ // module.serviceExecute()
+ // this function is called only from this module
+ // its purpose is to prepare the request and call
+ // netdata.serviceExecute()
+ serviceExecute: function (name, uri, update_every) {
+ netdata.debug(this.name + ': ' + name + ': url: ' + uri + ', update_every: ' + update_every);
+
+ var service = netdata.service({
+ name: name,
+ request: netdata.requestFromURL(uri),
+ update_every: update_every,
+ module: this
+ });
+ service.execute(this.processResponse);
+ },
+
+
+ configure: function (config) {
+ if (stiebeleltron.isUndefined(config.pages)) return 0;
+ var added = 0;
+ var pageCount = config.pages.length;
+ while (pageCount--) {
+ var page = config.pages[pageCount];
+ // some validation
+ if (stiebeleltron.isUndefined(page.categories) || page.categories.length < 1) {
+ netdata.error("Your Stiebel Eltron config is invalid. Disabling plugin.");
+ return 0;
+ }
+ if (stiebeleltron.isUndefined(page.update_every)) page.update_every = this.update_every;
+ this.pages[page.name] = page;
+ this.serviceExecute(page.name, page.url, page.update_every);
+ added++;
+ }
+ return added;
+ },
+
+ // module.update()
+ // this is called repeatedly to collect data, by calling
+ // netdata.serviceExecute()
+ update: function (service, callback) {
+ service.execute(function (serv, data) {
+ service.module.processResponse(serv, data);
+ callback();
+ });
+ },
+
+ getChartId: function (context) {
+ return "stiebeleltron_" + context.page.id +
+ "." + context.category.id +
+ "." + context.chartDefinition.id;
+ },
+
+ getDimensionId: function (context) {
+ return context.dimension.id;
+ },
+
+ isUndefined: function (value) {
+ return typeof value === 'undefined';
+ },
+
+ isDefined: function (value) {
+ return typeof value !== 'undefined';
+ }
+};
+
+module.exports = stiebeleltron;
diff --git a/collectors/plugins.d/Makefile.am b/collectors/plugins.d/Makefile.am
new file mode 100644
index 000000000..59250a997
--- /dev/null
+++ b/collectors/plugins.d/Makefile.am
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+SUBDIRS = \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/collectors/plugins.d/Makefile.in b/collectors/plugins.d/Makefile.in
new file mode 100644
index 000000000..b2c112811
--- /dev/null
+++ b/collectors/plugins.d/Makefile.in
@@ -0,0 +1,647 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = collectors/plugins.d
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
+ $(dist_noinst_DATA)
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \
+ ctags-recursive dvi-recursive html-recursive info-recursive \
+ install-data-recursive install-dvi-recursive \
+ install-exec-recursive install-html-recursive \
+ install-info-recursive install-pdf-recursive \
+ install-ps-recursive install-recursive installcheck-recursive \
+ installdirs-recursive pdf-recursive ps-recursive \
+ tags-recursive uninstall-recursive
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \
+ distclean-recursive maintainer-clean-recursive
+am__recursive_targets = \
+ $(RECURSIVE_TARGETS) \
+ $(RECURSIVE_CLEAN_TARGETS) \
+ $(am__extra_recursive_targets)
+AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \
+ distdir
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates. Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+ BEGIN { nonempty = 0; } \
+ { items[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique. This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+ list='$(am__tagged_files)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+DIST_SUBDIRS = $(SUBDIRS)
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+am__relativize = \
+ dir0=`pwd`; \
+ sed_first='s,^\([^/]*\)/.*$$,\1,'; \
+ sed_rest='s,^[^/]*/*,,'; \
+ sed_last='s,^.*/\([^/]*\)$$,\1,'; \
+ sed_butlast='s,/*[^/]*$$,,'; \
+ while test -n "$$dir1"; do \
+ first=`echo "$$dir1" | sed -e "$$sed_first"`; \
+ if test "$$first" != "."; then \
+ if test "$$first" = ".."; then \
+ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \
+ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \
+ else \
+ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \
+ if test "$$first2" = "$$first"; then \
+ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \
+ else \
+ dir2="../$$dir2"; \
+ fi; \
+ dir0="$$dir0"/"$$first"; \
+ fi; \
+ fi; \
+ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \
+ done; \
+ reldir="$$dir2"
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+SUBDIRS = \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-recursive
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/plugins.d/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu collectors/plugins.d/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+# This directory's subdirectories are mostly independent; you can cd
+# into them and run 'make' without going through this Makefile.
+# To change the values of 'make' variables: instead of editing Makefiles,
+# (1) if the variable is set in 'config.status', edit 'config.status'
+# (which will cause the Makefiles to be regenerated when you run 'make');
+# (2) otherwise, pass the desired values on the 'make' command line.
+$(am__recursive_targets):
+ @fail=; \
+ if $(am__make_keepgoing); then \
+ failcom='fail=yes'; \
+ else \
+ failcom='exit 1'; \
+ fi; \
+ dot_seen=no; \
+ target=`echo $@ | sed s/-recursive//`; \
+ case "$@" in \
+ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
+ *) list='$(SUBDIRS)' ;; \
+ esac; \
+ for subdir in $$list; do \
+ echo "Making $$target in $$subdir"; \
+ if test "$$subdir" = "."; then \
+ dot_seen=yes; \
+ local_target="$$target-am"; \
+ else \
+ local_target="$$target"; \
+ fi; \
+ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+ || eval $$failcom; \
+ done; \
+ if test "$$dot_seen" = "no"; then \
+ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
+ fi; test -z "$$fail"
+
+ID: $(am__tagged_files)
+ $(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-recursive
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ set x; \
+ here=`pwd`; \
+ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
+ include_option=--etags-include; \
+ empty_fix=.; \
+ else \
+ include_option=--include; \
+ empty_fix=; \
+ fi; \
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ test ! -f $$subdir/TAGS || \
+ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
+ fi; \
+ done; \
+ $(am__define_uniq_tagged_files); \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: ctags-recursive
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ $(am__define_uniq_tagged_files); \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-recursive
+
+cscopelist-am: $(am__tagged_files)
+ list='$(am__tagged_files)'; \
+ case "$(srcdir)" in \
+ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+ *) sdir=$(subdir)/$(srcdir) ;; \
+ esac; \
+ for i in $$list; do \
+ if test -f "$$i"; then \
+ echo "$(subdir)/$$i"; \
+ else \
+ echo "$$sdir/$$i"; \
+ fi; \
+ done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+ @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ $(am__make_dryrun) \
+ || test -d "$(distdir)/$$subdir" \
+ || $(MKDIR_P) "$(distdir)/$$subdir" \
+ || exit 1; \
+ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
+ $(am__relativize); \
+ new_distdir=$$reldir; \
+ dir1=$$subdir; dir2="$(top_distdir)"; \
+ $(am__relativize); \
+ new_top_distdir=$$reldir; \
+ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \
+ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \
+ ($(am__cd) $$subdir && \
+ $(MAKE) $(AM_MAKEFLAGS) \
+ top_distdir="$$new_top_distdir" \
+ distdir="$$new_distdir" \
+ am__remove_distdir=: \
+ am__skip_length_check=: \
+ am__skip_mode_fix=: \
+ distdir) \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-recursive
+all-am: Makefile $(DATA)
+installdirs: installdirs-recursive
+installdirs-am:
+install: install-recursive
+install-exec: install-exec-recursive
+install-data: install-data-recursive
+uninstall: uninstall-recursive
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-recursive
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-recursive
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-recursive
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic distclean-tags
+
+dvi: dvi-recursive
+
+dvi-am:
+
+html: html-recursive
+
+html-am:
+
+info: info-recursive
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-recursive
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-recursive
+
+install-html-am:
+
+install-info: install-info-recursive
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-recursive
+
+install-pdf-am:
+
+install-ps: install-ps-recursive
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-recursive
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-recursive
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-recursive
+
+pdf-am:
+
+ps: ps-recursive
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: $(am__recursive_targets) install-am install-strip
+
+.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \
+ check-am clean clean-generic cscopelist-am ctags ctags-am \
+ distclean distclean-generic distclean-tags distdir dvi dvi-am \
+ html html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs installdirs-am maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags tags-am uninstall uninstall-am
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/collectors/plugins.d/README.md b/collectors/plugins.d/README.md
new file mode 100644
index 000000000..d3aa5b5b0
--- /dev/null
+++ b/collectors/plugins.d/README.md
@@ -0,0 +1,472 @@
+# Netdata External Plugins
+
+`plugins.d` is the netdata internal plugin that collects metrics
+from external processes, thus allowing netdata to use **external plugins**.
+
+## Provided External Plugins
+
+plugin|language|O/S|description
+:---:|:---:|:---:|:---
+[apps.plugin](../apps.plugin/)|`C`|linux, freebsd|monitors the whole process tree on Linux and FreeBSD and breaks down system resource usage by **process**, **user** and **user group**.
+[charts.d.plugin](../charts.d.plugin/)|`BASH`|all|a **plugin orchestrator** for data collection modules written in `BASH` v4+.
+[fping.plugin](../fping.plugin/)|`C`|all|measures network latency, jitter and packet loss between the monitored node and any number of remote network end points.
+[freeipmi.plugin](../freeipmi.plugin/)|`C`|linux|collects metrics from enterprise hardware sensors, on Linux servers.
+[node.d.plugin](../node.d.plugin/)|`node.js`|all|a **plugin orchestrator** for data collection modules written in `node.js`.
+[python.d.plugin](../python.d.plugin/)|`python`|all|a **plugin orchestrator** for data collection modules written in `python` v2 or v3 (both are supported).
+
+Plugin orchestrators may also be described as **modular plugins**. They are modular since they accept custom made modules to be included. Writing modules for these plugins is easier than accessing the native netdata API directly. You will find modules already available for each orchestrator under the directory of the particular modular plugin (e.g. under python.d.plugin for the python orchestrator).
+Each of these modular plugins has each own methods for defining modules. Please check the examples and their documentation.
+
+## Motivation
+
+This plugin allows netdata to use **external plugins** for data collection:
+
+1. external data collection plugins may be written in any computer language.
+
+2. external data collection plugins may use O/S capabilities or `setuid` to
+ run with escalated privileges (compared to the netdata daemon).
+ The communication between the external plugin and netdata is unidirectional
+ (from the plugin to netdata), so that netdata cannot manipulate an external
+ plugin running with escalated privileges.
+
+## Operation
+
+Each of the external plugins is expected to run forever.
+Netdata will start it when it starts and stop it when it exits.
+
+If the external plugin exits or crashes, netdata will log an error.
+If the external plugin exits or crashes without pushing metrics to netdata, netdata will not start it again.
+- Plugins that exit with any value other than zero, will be disabled. Plugins that exit with zero, will be restarted after some time.
+- Plugins may also be disabled by netdata if they output things that netdata does not understand.
+
+The `stdout` of external plugins is connected to netdata to receive metrics,
+with the API defined below.
+
+The `stderr` of external plugins is connected to netdata `error.log`.
+
+Plugins can create any number of charts with any number of dimensions each. Each chart can have its own characteristics independently of the others generated by the same plugin. For example, one chart may have an update frequency of 1 second, another may have 5 seconds and a third may have 10 seconds.
+
+## Configuration
+
+Netdata will supply the environment variables `NETDATA_USER_CONFIG_DIR` (for user supplied) and `NETDATA_STOCK_CONFIG_DIR` (for netdata supplied) configuration files to identify the directory where configuration files are stored. It is up to the plugin to read the configuration it needs.
+
+The `netdata.conf` section [plugins] section contains a list of all the plugins found at the system where netdata runs, with a boolean setting to enable them or not.
+
+Example:
+
+```
+[plugins]
+ # enable running new plugins = yes
+ # check for new plugins every = 60
+
+ # charts.d = yes
+ # fping = yes
+ # node.d = yes
+ # python.d = yes
+```
+
+The setting `enable running new plugins` changes the default behavior for all external plugins.
+So if set to `no`, only the plugins that are explicitly set to `yes` will be run.
+
+The setting `check for new plugins every` controls the time the directory `/usr/libexec/netdata/plugins.d`
+will be rescanned for new plugins. So, new plugins can give added anytime.
+
+For each of the external plugins enabled, another `netdata.conf` section
+is created, in the form of `[plugin:NAME]`, where `NAME` is the name of the external plugin.
+This section allows controlling the update frequency of the plugin and provide
+additional command line arguments to it.
+
+For example, for `apps.plugin` the following section is available:
+
+```
+[plugin:apps]
+ # update every = 1
+ # command options =
+```
+
+- `update every` controls the granularity of the external plugin.
+- `command options` allows giving additional command line options to the plugin.
+
+
+Netdata will provide to the extrenal plugins the environment variable `NETDATA_UPDATE_EVERY`, in seconds (the default is 1). This is the **minimum update frequency** for all charts. A plugin that is updating values more frequently than this, is just wasting resources.
+
+Netdata will call the plugin with just one command line parameter: the number of seconds the user requested this plugin to update its data (by default is also 1).
+
+Other than the above, the plugin configuration is up to the plugin.
+
+Keep in mind, that the user may use netdata configuration to overwrite chart and dimension parameters. This is transparent to the plugin.
+
+### Autoconfiguration
+
+Plugins should attempt to autoconfigure themselves when possible.
+
+For example, if your plugin wants to monitor `squid`, you can search for it on port `3128` or `8080`. If any succeeds, you can proceed. If it fails you can output an error (on stderr) saying that you cannot find `squid` running and giving instructions about the plugin configuration. Then you can stop (exit with non-zero value), so that netdata will not attempt to start the plugin again.
+
+## External Plugins API
+
+Any program that can print a few values to its standard output can become a netdata external plugin.
+
+There are 7 lines netdata parses. lines starting with:
+
+- `CHART` - create or update a chart
+- `DIMENSION` - add or update a dimension to the chart just created
+- `BEGIN` - initialize data collection for a chart
+- `SET` - set the value of a dimension for the initialized chart
+- `END` - complete data collection for the initialized chart
+- `FLUSH` - ignore the last collected values
+- `DISABLE` - disable this plugin
+
+a single program can produce any number of charts with any number of dimensions each.
+
+Charts can be added any time (not just the beginning).
+
+### command line parameters
+
+The plugin **MUST** accept just **one** parameter: **the number of seconds it is
+expected to update the values for its charts**. The value passed by netdata
+to the plugin is controlled via its configuration file (so there is no need
+for the plugin to handle this configuration option).
+
+The external plugin can overwrite the update frequency. For example, the server may
+request per second updates, but the plugin may ignore it and update its charts
+every 5 seconds.
+
+### environment variables
+
+There are a few environment variables that are set by `netdata` and are
+available for the plugin to use.
+
+variable|description
+:------:|:----------
+`NETDATA_USER_CONFIG_DIR`|The directory where all netdata related user configuration should be stored. If the plugin requires custom user configuration, this is the place the user has saved it (normally under `/etc/netdata`).
+`NETDATA_STOCK_CONFIG_DIR`|The directory where all netdata related stock configuration should be stored. If the plugin is shipped with configuration files, this is the place they can be found (normally under `/usr/lib/netdata/conf.d`).
+`NETDATA_PLUGINS_DIR`|The directory where all netdata plugins are stored.
+`NETDATA_WEB_DIR`|The directory where the web files of netdata are saved.
+`NETDATA_CACHE_DIR`|The directory where the cache files of netdata are stored. Use this directory if the plugin requires a place to store data. A new directory should be created for the plugin for this purpose, inside this directory.
+`NETDATA_LOG_DIR`|The directory where the log files are stored. By default the `stderr` output of the plugin will be saved in the `error.log` file of netdata.
+`NETDATA_HOST_PREFIX`|This is used in environments where system directories like `/sys` and `/proc` have to be accessed at a different path.
+`NETDATA_DEBUG_FLAGS`|This is a number (probably in hex starting with `0x`), that enables certain netdata debugging features. Check **[[Tracing Options]]** for more information.
+`NETDATA_UPDATE_EVERY`|The minimum number of seconds between chart refreshes. This is like the **internal clock** of netdata (it is user configurable, defaulting to `1`). There is no meaning for a plugin to update its values more frequently than this number of seconds.
+
+
+### The output of the plugin
+
+The plugin should output instructions for netdata to its output (`stdout`). Since this uses pipes, please make sure you flush stdout after every iteration.
+
+#### DISABLE
+
+`DISABLE` will disable this plugin. This will prevent netdata from restarting the plugin. You can also exit with the value `1` to have the same effect.
+
+#### CHART
+
+`CHART` defines a new chart.
+
+the template is:
+
+> CHART type.id name title units [family [context [charttype [priority [update_every [options [plugin [module]]]]]]]]
+
+ where:
+ - `type.id`
+
+ uniquely identifies the chart,
+ this is what will be needed to add values to the chart
+
+ the `type` part controls the menu the charts will appear in
+
+ - `name`
+
+ is the name that will be presented to the user instead of `id` in `type.id`. This means that only the `id` part of `type.id` is changed. When a name has been given, the chart is index (and can be referred) as both `type.id` and `type.name`. You can set name to `''`, or `null`, or `(null)` to disable it.
+
+ - `title`
+
+ the text above the chart
+
+ - `units`
+
+ the label of the vertical axis of the chart,
+ all dimensions added to a chart should have the same units
+ of measurement
+
+ - `family`
+
+ is used to group charts together
+ (for example all eth0 charts should say: eth0),
+ if empty or missing, the `id` part of `type.id` will be used
+
+ this controls the sub-menu on the dashboard
+
+ - `context`
+
+ the context is giving the template of the chart. For example, if multiple charts present the same information for a different family, they should have the same `context`
+
+ this is used for looking up rendering information for the chart (colors, sizes, informational texts) and also apply alarms to it
+
+ - `charttype`
+
+ one of `line`, `area` or `stacked`,
+ if empty or missing, the `line` will be used
+
+ - `priority`
+
+ is the relative priority of the charts as rendered on the web page,
+ lower numbers make the charts appear before the ones with higher numbers,
+ if empty or missing, `1000` will be used
+
+ - `update_every`
+
+ overwrite the update frequency set by the server,
+ if empty or missing, the user configured value will be used
+
+ - `options`
+
+ a space separated list of options, enclosed in quotes. 4 options are currently supported: `obsolete` to mark a chart as obsolete (netdata will hide it and delete it after some time), `detail` to mark a chart as insignificant (this may be used by dashboards to make the charts smaller, or somehow visualize properly a less important chart), `store_first` to make netdata store the first collected value, assuming there was an invisible previous value set to zero (this is used by statsd charts - if the first data collected value of incremental dimensions is not zero based, unrealistic spikes will appear with this option set) and `hidden` to perform all operations on a chart, but do not offer it on dashboards (the chart will be send to backends). `CHART` options have been added in netdata v1.7 and the `hidden` option was added in 1.10.
+
+ - `plugin` and `module`
+
+ both are just names that are used to let the user identify the plugin and the module that generated the chart. If `plugin` is unset or empty, netdata will automatically set the filename of the plugin that generated the chart. `module` has not default.
+
+
+#### DIMENSION
+
+`DIMENSION` defines a new dimension for the chart
+
+the template is:
+
+> DIMENSION id [name [algorithm [multiplier [divisor [hidden]]]]]
+
+ where:
+
+ - `id`
+
+ the `id` of this dimension (it is a text value, not numeric),
+ this will be needed later to add values to the dimension
+
+ We suggest to avoid using `.` in dimension ids. Backends expect metrics to be `.` separated and people will get confused if a dimension id contains a dot.
+
+ - `name`
+
+ the name of the dimension as it will appear at the legend of the chart,
+ if empty or missing the `id` will be used
+
+ - `algorithm`
+
+ one of:
+
+ * `absolute`
+
+ the value is to drawn as-is (interpolated to second boundary),
+ if `algorithm` is empty, invalid or missing, `absolute` is used
+
+ * `incremental`
+
+ the value increases over time,
+ the difference from the last value is presented in the chart,
+ the server interpolates the value and calculates a per second figure
+
+ * `percentage-of-absolute-row`
+
+ the % of this value compared to the total of all dimensions
+
+ * `percentage-of-incremental-row`
+
+ the % of this value compared to the incremental total of
+ all dimensions
+
+ - `multiplier`
+
+ an integer value to multiply the collected value,
+ if empty or missing, `1` is used
+
+ - `divisor`
+
+ an integer value to divide the collected value,
+ if empty or missing, `1` is used
+
+ - `hidden`
+
+ giving the keyword `hidden` will make this dimension hidden,
+ it will take part in the calculations but will not be presented in the chart
+
+
+#### VARIABLE
+
+> VARIABLE [SCOPE] name = value
+
+`VARIABLE` defines a variable that can be used in alarms. This is to used for setting constants (like the max connections a server may accept).
+
+Variables support 2 scopes:
+
+- `GLOBAL` or `HOST` to define the variable at the host level.
+- `LOCAL` or `CHART` to define the variable at the chart level. Use chart-local variables when the same variable may exist for different charts (i.e. netdata monitors 2 mysql servers, and you need to set the `max_connections` each server accepts). Using chart-local variables is the ideal to build alarm templates.
+
+The position of the `VARIABLE` line, sets its default scope (in case you do not specify a scope). So, defining a `VARIABLE` before any `CHART`, or between `END` and `BEGIN` (outside any chart), sets `GLOBAL` scope, while defining a `VARIABLE` just after a `CHART` or a `DIMENSION`, or within the `BEGIN` - `END` block of a chart, sets `LOCAL` scope.
+
+These variables can be set and updated at any point.
+
+Variable names should use alphanumeric characters, the `.` and the `_`.
+
+The `value` is floating point (netdata used `long double`).
+
+Variables are transferred to upstream netdata servers (streaming and database replication).
+
+## Data collection
+
+data collection is defined as a series of `BEGIN` -> `SET` -> `END` lines
+
+> BEGIN type.id [microseconds]
+
+ - `type.id`
+
+ is the unique identification of the chart (as given in `CHART`)
+
+ - `microseconds`
+
+ is the number of microseconds since the last update of the chart. It is optional.
+
+ Under heavy system load, the system may have some latency transferring
+ data from the plugins to netdata via the pipe. This number improves
+ accuracy significantly, since the plugin is able to calculate the
+ duration between its iterations better than netdata.
+
+ The first time the plugin is started, no microseconds should be given
+ to netdata.
+
+> SET id = value
+
+ - `id`
+
+ is the unique identification of the dimension (of the chart just began)
+
+ - `value`
+
+ is the collected value, only integer values are collected. If you want to push fractional values, multiply this value by 100 or 1000 and set the `DIMENSION` divider to 1000.
+
+> END
+
+ END does not take any parameters, it commits the collected values for all dimensions to the chart. If a dimensions was not `SET`, its value will be empty for this commit.
+
+More `SET` lines may appear to update all the dimensions of the chart.
+All of them in one `BEGIN` -> `END` block.
+
+All `SET` lines within a single `BEGIN` -> `END` block have to refer to the
+same chart.
+
+If more charts need to be updated, each chart should have its own
+`BEGIN` -> `SET` -> `END` block.
+
+If, for any reason, a plugin has issued a `BEGIN` but wants to cancel it,
+it can issue a `FLUSH`. The `FLUSH` command will instruct netdata to ignore
+all the values collected since the last `BEGIN` command.
+
+If a plugin does not behave properly (outputs invalid lines, or does not
+follow these guidelines), will be disabled by netdata.
+
+### collected values
+
+netdata will collect any **signed** value in the 64bit range:
+`-9.223.372.036.854.775.808` to `+9.223.372.036.854.775.807`
+
+If a value is not collected, leave it empty, like this:
+
+`SET id = `
+
+or do not output the line at all.
+
+## Modular Plugins
+
+1. **python**, use `python.d.plugin`, there are many examples in the [python.d directory](../python.d.plugin)
+
+ python is ideal for netdata plugins. It is a simple, yet powerful way to collect data, it has a very small memory footprint, although it is not the most CPU efficient way to do it.
+
+2. **node.js**, use `node.d.plugin`, there are a few examples in the [node.d directory](../node.d.plugin)
+
+ node.js is the fastest scripting language for collecting data. If your plugin needs to do a lot of work, compute values, etc, node.js is probably the best choice before moving to compiled code. Keep in mind though that node.js is not memory efficient; it will probably need more RAM compared to python.
+
+3. **BASH**, use `charts.d.plugin`, there are many examples in the [charts.d directory](../charts.d.plugin)
+
+ BASH is the simplest scripting language for collecting values. It is the less efficient though in terms of CPU resources. You can use it to collect data quickly, but extensive use of it might use a lot of system resources.
+
+4. **C**
+
+ Of course, C is the most efficient way of collecting data. This is why netdata itself is written in C.
+
+---
+
+## Writing Plugins Properly
+
+There are a few rules for writing plugins properly:
+
+1. Respect system resources
+
+ Pay special attention to efficiency:
+
+ - Initialize everything once, at the beginning. Initialization is not an expensive operation. Your plugin will most probably be started once and run forever. So, do whatever heavy operation is needed at the beginning, just once.
+ - Do the absolutely minimum while iterating to collect values repeatedly.
+ - If you need to connect to another server to collect values, avoid re-connects if possible. Connect just once, with keep-alive (for HTTP) enabled and collect values using the same connection.
+ - Avoid any CPU or memory heavy operation while collecting data. If you control memory allocation, avoid any memory allocation white iterating to collect values.
+ - Avoid running external commands when possible. If you are writing shell scripts avoid especially pipes (each pipe is another fork, a very expensive operation).
+
+2. The best way to iterate at a constant pace is this pseudo code:
+
+```js
+ var update_every = argv[1] * 1000; /* seconds * 1000 = milliseconds */
+
+ readConfiguration();
+
+ if(!verifyWeCanCollectValues()) {
+ print "DISABLE";
+ exit(1);
+ }
+
+ createCharts(); /* print CHART and DIMENSION statements */
+
+ var loops = 0;
+ var last_run = 0;
+ var next_run = 0;
+ var dt_since_last_run = 0;
+ var now = 0;
+
+ FOREVER {
+ /* find the current time in milliseconds */
+ now = currentTimeStampInMilliseconds();
+
+ /*
+ * find the time of the next loop
+ * this makes sure we are always aligned
+ * with the netdata daemon
+ */
+ next_run = now - (now % update_every) + update_every;
+
+ /*
+ * wait until it is time
+ * it is important to do it in a loop
+ * since many wait functions can be interrupted
+ */
+ while( now < next_run ) {
+ sleepMilliseconds(next_run - now);
+ now = currentTimeStampInMilliseconds();
+ }
+
+ /* calculate the time passed since the last run */
+ if ( loops > 0 )
+ dt_since_last_run = (now - last_run) * 1000; /* in microseconds */
+
+ /* prepare for the next loop */
+ last_run = now;
+ loops++;
+
+ /* do your magic here to collect values */
+ collectValues();
+
+ /* send the collected data to netdata */
+ printValues(dt_since_last_run); /* print BEGIN, SET, END statements */
+ }
+```
+
+ Using the above procedure, your plugin will be synchronized to start data collection on steps of `update_every`. There will be no need to keep track of latencies in data collection.
+
+ Netdata interpolates values to second boundaries, so even if your plugin is not perfectly aligned it does not matter. Netdata will find out. When your plugin works in increments of `update_every`, there will be no gaps in the charts due to the possible cumulative micro-delays in data collection. Gaps will only appear if the data collection is really delayed.
+
+3. If you are not sure of memory leaks, exit every one hour. Netdata will re-start your process.
+
+4. If possible, try to autodetect if your plugin should be enabled, without any configuration.
diff --git a/collectors/plugins.d/plugins_d.c b/collectors/plugins.d/plugins_d.c
new file mode 100644
index 000000000..465ecd796
--- /dev/null
+++ b/collectors/plugins.d/plugins_d.c
@@ -0,0 +1,696 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugins_d.h"
+
+char *plugin_directories[PLUGINSD_MAX_DIRECTORIES] = { NULL };
+char *netdata_configured_plugins_dir_base;
+
+struct plugind *pluginsd_root = NULL;
+
+static inline int pluginsd_space(char c) {
+ switch(c) {
+ case ' ':
+ case '\t':
+ case '\r':
+ case '\n':
+ case '=':
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+inline int config_isspace(char c) {
+ switch(c) {
+ case ' ':
+ case '\t':
+ case '\r':
+ case '\n':
+ case ',':
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+// split a text into words, respecting quotes
+inline int quoted_strings_splitter(char *str, char **words, int max_words, int (*custom_isspace)(char)) {
+ char *s = str, quote = 0;
+ int i = 0, j;
+
+ // skip all white space
+ while(unlikely(custom_isspace(*s))) s++;
+
+ // check for quote
+ if(unlikely(*s == '\'' || *s == '"')) {
+ quote = *s; // remember the quote
+ s++; // skip the quote
+ }
+
+ // store the first word
+ words[i++] = s;
+
+ // while we have something
+ while(likely(*s)) {
+ // if it is escape
+ if(unlikely(*s == '\\' && s[1])) {
+ s += 2;
+ continue;
+ }
+
+ // if it is quote
+ else if(unlikely(*s == quote)) {
+ quote = 0;
+ *s = ' ';
+ continue;
+ }
+
+ // if it is a space
+ else if(unlikely(quote == 0 && custom_isspace(*s))) {
+
+ // terminate the word
+ *s++ = '\0';
+
+ // skip all white space
+ while(likely(custom_isspace(*s))) s++;
+
+ // check for quote
+ if(unlikely(*s == '\'' || *s == '"')) {
+ quote = *s; // remember the quote
+ s++; // skip the quote
+ }
+
+ // if we reached the end, stop
+ if(unlikely(!*s)) break;
+
+ // store the next word
+ if(likely(i < max_words)) words[i++] = s;
+ else break;
+ }
+
+ // anything else
+ else s++;
+ }
+
+ // terminate the words
+ j = i;
+ while(likely(j < max_words)) words[j++] = NULL;
+
+ return i;
+}
+
+inline int pluginsd_split_words(char *str, char **words, int max_words) {
+ return quoted_strings_splitter(str, words, max_words, pluginsd_space);
+}
+
+inline size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp, int trust_durations) {
+ int enabled = cd->enabled;
+
+ if(!fp || !enabled) {
+ cd->enabled = 0;
+ return 0;
+ }
+
+ size_t count = 0;
+
+ char line[PLUGINSD_LINE_MAX + 1];
+
+ char *words[PLUGINSD_MAX_WORDS] = { NULL };
+ uint32_t BEGIN_HASH = simple_hash(PLUGINSD_KEYWORD_BEGIN);
+ uint32_t END_HASH = simple_hash(PLUGINSD_KEYWORD_END);
+ uint32_t FLUSH_HASH = simple_hash(PLUGINSD_KEYWORD_FLUSH);
+ uint32_t CHART_HASH = simple_hash(PLUGINSD_KEYWORD_CHART);
+ uint32_t DIMENSION_HASH = simple_hash(PLUGINSD_KEYWORD_DIMENSION);
+ uint32_t DISABLE_HASH = simple_hash(PLUGINSD_KEYWORD_DISABLE);
+ uint32_t VARIABLE_HASH = simple_hash(PLUGINSD_KEYWORD_VARIABLE);
+
+ RRDSET *st = NULL;
+ uint32_t hash;
+
+ errno = 0;
+ clearerr(fp);
+
+ if(unlikely(fileno(fp) == -1)) {
+ error("file descriptor given is not a valid stream");
+ goto cleanup;
+ }
+
+ while(!ferror(fp)) {
+ if(unlikely(netdata_exit)) break;
+
+ char *r = fgets(line, PLUGINSD_LINE_MAX, fp);
+ if(unlikely(!r)) {
+ error("read failed");
+ break;
+ }
+
+ if(unlikely(netdata_exit)) break;
+
+ line[PLUGINSD_LINE_MAX] = '\0';
+
+ int w = pluginsd_split_words(line, words, PLUGINSD_MAX_WORDS);
+ char *s = words[0];
+ if(unlikely(!s || !*s || !w)) {
+ continue;
+ }
+
+ // debug(D_PLUGINSD, "PLUGINSD: words 0='%s' 1='%s' 2='%s' 3='%s' 4='%s' 5='%s' 6='%s' 7='%s' 8='%s' 9='%s'", words[0], words[1], words[2], words[3], words[4], words[5], words[6], words[7], words[8], words[9]);
+
+ if(likely(!simple_hash_strcmp(s, "SET", &hash))) {
+ char *dimension = words[1];
+ char *value = words[2];
+
+ if(unlikely(!dimension || !*dimension)) {
+ error("requested a SET on chart '%s' of host '%s', without a dimension. Disabling it.", st->id, host->hostname);
+ enabled = 0;
+ break;
+ }
+
+ if(unlikely(!value || !*value)) value = NULL;
+
+ if(unlikely(!st)) {
+ error("requested a SET on dimension %s with value %s on host '%s', without a BEGIN. Disabling it.", dimension, value?value:"<nothing>", host->hostname);
+ enabled = 0;
+ break;
+ }
+
+ if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG)))
+ debug(D_PLUGINSD, "is setting dimension %s/%s to %s", st->id, dimension, value?value:"<nothing>");
+
+ if(value) {
+ RRDDIM *rd = rrddim_find(st, dimension);
+ if(unlikely(!rd)) {
+ error("requested a SET to dimension with id '%s' on stats '%s' (%s) on host '%s', which does not exist. Disabling it.", dimension, st->name, st->id, st->rrdhost->hostname);
+ enabled = 0;
+ break;
+ }
+ else
+ rrddim_set_by_pointer(st, rd, strtoll(value, NULL, 0));
+ }
+ }
+ else if(likely(hash == BEGIN_HASH && !strcmp(s, PLUGINSD_KEYWORD_BEGIN))) {
+ char *id = words[1];
+ char *microseconds_txt = words[2];
+
+ if(unlikely(!id)) {
+ error("requested a BEGIN without a chart id for host '%s'. Disabling it.", host->hostname);
+ enabled = 0;
+ break;
+ }
+
+ st = rrdset_find(host, id);
+ if(unlikely(!st)) {
+ error("requested a BEGIN on chart '%s', which does not exist on host '%s'. Disabling it.", id, host->hostname);
+ enabled = 0;
+ break;
+ }
+
+ if(likely(st->counter_done)) {
+ usec_t microseconds = 0;
+ if(microseconds_txt && *microseconds_txt) microseconds = str2ull(microseconds_txt);
+
+ if(likely(microseconds)) {
+ if(trust_durations)
+ rrdset_next_usec_unfiltered(st, microseconds);
+ else
+ rrdset_next_usec(st, microseconds);
+ }
+ else rrdset_next(st);
+ }
+ }
+ else if(likely(hash == END_HASH && !strcmp(s, PLUGINSD_KEYWORD_END))) {
+ if(unlikely(!st)) {
+ error("requested an END, without a BEGIN on host '%s'. Disabling it.", host->hostname);
+ enabled = 0;
+ break;
+ }
+
+ if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG)))
+ debug(D_PLUGINSD, "requested an END on chart %s", st->id);
+
+ rrdset_done(st);
+ st = NULL;
+
+ count++;
+ }
+ else if(likely(hash == CHART_HASH && !strcmp(s, PLUGINSD_KEYWORD_CHART))) {
+ st = NULL;
+
+ char *type = words[1];
+ char *name = words[2];
+ char *title = words[3];
+ char *units = words[4];
+ char *family = words[5];
+ char *context = words[6];
+ char *chart = words[7];
+ char *priority_s = words[8];
+ char *update_every_s = words[9];
+ char *options = words[10];
+ char *plugin = words[11];
+ char *module = words[12];
+
+ // parse the id from type
+ char *id = NULL;
+ if(likely(type && (id = strchr(type, '.')))) {
+ *id = '\0';
+ id++;
+ }
+
+ // make sure we have the required variables
+ if(unlikely(!type || !*type || !id || !*id)) {
+ error("requested a CHART, without a type.id, on host '%s'. Disabling it.", host->hostname);
+ enabled = 0;
+ break;
+ }
+
+ // parse the name, and make sure it does not include 'type.'
+ if(unlikely(name && *name)) {
+ // when data are coming from slaves
+ // name will be type.name
+ // so we have to remove 'type.' from name too
+ size_t len = strlen(type);
+ if(strncmp(type, name, len) == 0 && name[len] == '.')
+ name = &name[len + 1];
+
+ // if the name is the same with the id,
+ // or is just 'NULL', clear it.
+ if(unlikely(strcmp(name, id) == 0 || strcasecmp(name, "NULL") == 0 || strcasecmp(name, "(NULL)") == 0))
+ name = NULL;
+ }
+
+ int priority = 1000;
+ if(likely(priority_s && *priority_s)) priority = str2i(priority_s);
+
+ int update_every = cd->update_every;
+ if(likely(update_every_s && *update_every_s)) update_every = str2i(update_every_s);
+ if(unlikely(!update_every)) update_every = cd->update_every;
+
+ RRDSET_TYPE chart_type = RRDSET_TYPE_LINE;
+ if(unlikely(chart)) chart_type = rrdset_type_id(chart);
+
+ if(unlikely(name && !*name)) name = NULL;
+ if(unlikely(family && !*family)) family = NULL;
+ if(unlikely(context && !*context)) context = NULL;
+ if(unlikely(!title)) title = "";
+ if(unlikely(!units)) units = "unknown";
+
+ debug(D_PLUGINSD, "creating chart type='%s', id='%s', name='%s', family='%s', context='%s', chart='%s', priority=%d, update_every=%d"
+ , type, id
+ , name?name:""
+ , family?family:""
+ , context?context:""
+ , rrdset_type_name(chart_type)
+ , priority
+ , update_every
+ );
+
+ st = rrdset_create(
+ host
+ , type
+ , id
+ , name
+ , family
+ , context
+ , title
+ , units
+ , (plugin && *plugin)?plugin:cd->filename
+ , module
+ , priority
+ , update_every
+ , chart_type
+ );
+
+ if(options && *options) {
+ if(strstr(options, "obsolete"))
+ rrdset_is_obsolete(st);
+ else
+ rrdset_isnot_obsolete(st);
+
+ if(strstr(options, "detail"))
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+ else
+ rrdset_flag_clear(st, RRDSET_FLAG_DETAIL);
+
+ if(strstr(options, "hidden"))
+ rrdset_flag_set(st, RRDSET_FLAG_HIDDEN);
+ else
+ rrdset_flag_clear(st, RRDSET_FLAG_HIDDEN);
+
+ if(strstr(options, "store_first"))
+ rrdset_flag_set(st, RRDSET_FLAG_STORE_FIRST);
+ else
+ rrdset_flag_clear(st, RRDSET_FLAG_STORE_FIRST);
+ }
+ else {
+ rrdset_isnot_obsolete(st);
+ rrdset_flag_clear(st, RRDSET_FLAG_DETAIL);
+ rrdset_flag_clear(st, RRDSET_FLAG_STORE_FIRST);
+ }
+ }
+ else if(likely(hash == DIMENSION_HASH && !strcmp(s, PLUGINSD_KEYWORD_DIMENSION))) {
+ char *id = words[1];
+ char *name = words[2];
+ char *algorithm = words[3];
+ char *multiplier_s = words[4];
+ char *divisor_s = words[5];
+ char *options = words[6];
+
+ if(unlikely(!id || !*id)) {
+ error("requested a DIMENSION, without an id, host '%s' and chart '%s'. Disabling it.", host->hostname, st?st->id:"UNSET");
+ enabled = 0;
+ break;
+ }
+
+ if(unlikely(!st)) {
+ error("requested a DIMENSION, without a CHART, on host '%s'. Disabling it.", host->hostname);
+ enabled = 0;
+ break;
+ }
+
+ long multiplier = 1;
+ if(multiplier_s && *multiplier_s) multiplier = strtol(multiplier_s, NULL, 0);
+ if(unlikely(!multiplier)) multiplier = 1;
+
+ long divisor = 1;
+ if(likely(divisor_s && *divisor_s)) divisor = strtol(divisor_s, NULL, 0);
+ if(unlikely(!divisor)) divisor = 1;
+
+ if(unlikely(!algorithm || !*algorithm)) algorithm = "absolute";
+
+ if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG)))
+ debug(D_PLUGINSD, "creating dimension in chart %s, id='%s', name='%s', algorithm='%s', multiplier=%ld, divisor=%ld, hidden='%s'"
+ , st->id
+ , id
+ , name?name:""
+ , rrd_algorithm_name(rrd_algorithm_id(algorithm))
+ , multiplier
+ , divisor
+ , options?options:""
+ );
+
+ RRDDIM *rd = rrddim_add(st, id, name, multiplier, divisor, rrd_algorithm_id(algorithm));
+ rrddim_flag_clear(rd, RRDDIM_FLAG_HIDDEN);
+ rrddim_flag_clear(rd, RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS);
+ if(options && *options) {
+ if(strstr(options, "hidden") != NULL) rrddim_flag_set(rd, RRDDIM_FLAG_HIDDEN);
+ if(strstr(options, "noreset") != NULL) rrddim_flag_set(rd, RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS);
+ if(strstr(options, "nooverflow") != NULL) rrddim_flag_set(rd, RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS);
+ }
+ }
+ else if(likely(hash == VARIABLE_HASH && !strcmp(s, PLUGINSD_KEYWORD_VARIABLE))) {
+ char *name = words[1];
+ char *value = words[2];
+ int global = (st)?0:1;
+
+ if(name && *name) {
+ if((strcmp(name, "GLOBAL") == 0 || strcmp(name, "HOST") == 0)) {
+ global = 1;
+ name = words[2];
+ value = words[3];
+ }
+ else if((strcmp(name, "LOCAL") == 0 || strcmp(name, "CHART") == 0)) {
+ global = 0;
+ name = words[2];
+ value = words[3];
+ }
+ }
+
+ if(unlikely(!name || !*name)) {
+ error("requested a VARIABLE on host '%s', without a variable name. Disabling it.", host->hostname);
+ enabled = 0;
+ break;
+ }
+
+ if(unlikely(!value || !*value))
+ value = NULL;
+
+ if(value) {
+ char *endptr = NULL;
+ calculated_number v = (calculated_number)str2ld(value, &endptr);
+
+ if(unlikely(endptr && *endptr)) {
+ if(endptr == value)
+ error("the value '%s' of VARIABLE '%s' on host '%s' cannot be parsed as a number", value, name, host->hostname);
+ else
+ error("the value '%s' of VARIABLE '%s' on host '%s' has leftovers: '%s'", value, name, host->hostname, endptr);
+ }
+
+ if(global) {
+ RRDVAR *rv = rrdvar_custom_host_variable_create(host, name);
+ if (rv) rrdvar_custom_host_variable_set(host, rv, v);
+ else error("cannot find/create HOST VARIABLE '%s' on host '%s'", name, host->hostname);
+ }
+ else if(st) {
+ RRDSETVAR *rs = rrdsetvar_custom_chart_variable_create(st, name);
+ if (rs) rrdsetvar_custom_chart_variable_set(rs, v);
+ else error("cannot find/create CHART VARIABLE '%s' on host '%s', chart '%s'", name, host->hostname, st->id);
+ }
+ else
+ error("cannot find/create CHART VARIABLE '%s' on host '%s' without a chart", name, host->hostname);
+ }
+ else
+ error("cannot set %s VARIABLE '%s' on host '%s' to an empty value", (global)?"HOST":"CHART", name, host->hostname);
+ }
+ else if(likely(hash == FLUSH_HASH && !strcmp(s, PLUGINSD_KEYWORD_FLUSH))) {
+ debug(D_PLUGINSD, "requested a FLUSH");
+ st = NULL;
+ }
+ else if(unlikely(hash == DISABLE_HASH && !strcmp(s, PLUGINSD_KEYWORD_DISABLE))) {
+ info("called DISABLE. Disabling it.");
+ enabled = 0;
+ break;
+ }
+ else {
+ error("sent command '%s' which is not known by netdata, for host '%s'. Disabling it.", s, host->hostname);
+ enabled = 0;
+ break;
+ }
+ }
+
+cleanup:
+ cd->enabled = enabled;
+
+ if(likely(count)) {
+ cd->successful_collections += count;
+ cd->serial_failures = 0;
+ }
+ else
+ cd->serial_failures++;
+
+ return count;
+}
+
+static void pluginsd_worker_thread_cleanup(void *arg) {
+ struct plugind *cd = (struct plugind *)arg;
+
+ if(cd->enabled && !cd->obsolete) {
+ cd->obsolete = 1;
+
+ info("data collection thread exiting");
+
+ if (cd->pid) {
+ siginfo_t info;
+ info("killing child process pid %d", cd->pid);
+ if (killpid(cd->pid, SIGTERM) != -1) {
+ info("waiting for child process pid %d to exit...", cd->pid);
+ waitid(P_PID, (id_t) cd->pid, &info, WEXITED);
+ }
+ cd->pid = 0;
+ }
+ }
+}
+
+void *pluginsd_worker_thread(void *arg) {
+ netdata_thread_cleanup_push(pluginsd_worker_thread_cleanup, arg);
+
+ struct plugind *cd = (struct plugind *)arg;
+
+ cd->obsolete = 0;
+ size_t count = 0;
+
+ while(!netdata_exit) {
+ FILE *fp = mypopen(cd->cmd, &cd->pid);
+ if(unlikely(!fp)) {
+ error("Cannot popen(\"%s\", \"r\").", cd->cmd);
+ break;
+ }
+
+ info("connected to '%s' running on pid %d", cd->fullfilename, cd->pid);
+ count = pluginsd_process(localhost, cd, fp, 0);
+ error("'%s' (pid %d) disconnected after %zu successful data collections (ENDs).", cd->fullfilename, cd->pid, count);
+ killpid(cd->pid, SIGTERM);
+
+ // get the return code
+ int code = mypclose(fp, cd->pid);
+
+ if(code != 0) {
+ // the plugin reports failure
+
+ if(likely(!cd->successful_collections)) {
+ // nothing collected - disable it
+ error("'%s' (pid %d) exited with error code %d. Disabling it.", cd->fullfilename, cd->pid, code);
+ cd->enabled = 0;
+ }
+ else {
+ // we have collected something
+
+ if(likely(cd->serial_failures <= 10)) {
+ error("'%s' (pid %d) exited with error code %d, but has given useful output in the past (%zu times). %s", cd->fullfilename, cd->pid, code, cd->successful_collections, cd->enabled?"Waiting a bit before starting it again.":"Will not start it again - it is disabled.");
+ sleep((unsigned int) (cd->update_every * 10));
+ }
+ else {
+ error("'%s' (pid %d) exited with error code %d, but has given useful output in the past (%zu times). We tried %zu times to restart it, but it failed to generate data. Disabling it.", cd->fullfilename, cd->pid, code, cd->successful_collections, cd->serial_failures);
+ cd->enabled = 0;
+ }
+ }
+ }
+ else {
+ // the plugin reports success
+
+ if(unlikely(!cd->successful_collections)) {
+ // we have collected nothing so far
+
+ if(likely(cd->serial_failures <= 10)) {
+ error("'%s' (pid %d) does not generate useful output but it reports success (exits with 0). %s.", cd->fullfilename, cd->pid, cd->enabled?"Waiting a bit before starting it again.":"Will not start it again - it is now disabled.");
+ sleep((unsigned int) (cd->update_every * 10));
+ }
+ else {
+ error("'%s' (pid %d) does not generate useful output, although it reports success (exits with 0), but we have tried %zu times to collect something. Disabling it.", cd->fullfilename, cd->pid, cd->serial_failures);
+ cd->enabled = 0;
+ }
+ }
+ else
+ sleep((unsigned int) cd->update_every);
+ }
+ cd->pid = 0;
+
+ if(unlikely(!cd->enabled)) break;
+ }
+
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
+
+static void pluginsd_main_cleanup(void *data) {
+ struct netdata_static_thread *static_thread = (struct netdata_static_thread *)data;
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
+ info("cleaning up...");
+
+ struct plugind *cd;
+ for (cd = pluginsd_root; cd; cd = cd->next) {
+ if (cd->enabled && !cd->obsolete) {
+ info("stopping plugin thread: %s", cd->id);
+ netdata_thread_cancel(cd->thread);
+ }
+ }
+
+ info("cleanup completed.");
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+}
+
+void *pluginsd_main(void *ptr) {
+ netdata_thread_cleanup_push(pluginsd_main_cleanup, ptr);
+
+ int automatic_run = config_get_boolean(CONFIG_SECTION_PLUGINS, "enable running new plugins", 1);
+ int scan_frequency = (int) config_get_number(CONFIG_SECTION_PLUGINS, "check for new plugins every", 60);
+ if(scan_frequency < 1) scan_frequency = 1;
+
+ // store the errno for each plugins directory
+ // so that we don't log broken directories on each loop
+ int directory_errors[PLUGINSD_MAX_DIRECTORIES] = { 0 };
+
+ while(!netdata_exit) {
+ int idx;
+ const char *directory_name;
+
+ for( idx = 0; idx < PLUGINSD_MAX_DIRECTORIES && (directory_name = plugin_directories[idx]) ; idx++ ) {
+ if(unlikely(netdata_exit)) break;
+
+ errno = 0;
+ DIR *dir = opendir(directory_name);
+ if(unlikely(!dir)) {
+ if(directory_errors[idx] != errno) {
+ directory_errors[idx] = errno;
+ error("cannot open plugins directory '%s'", directory_name);
+ }
+ continue;
+ }
+
+ struct dirent *file = NULL;
+ while(likely((file = readdir(dir)))) {
+ if(unlikely(netdata_exit)) break;
+
+ debug(D_PLUGINSD, "examining file '%s'", file->d_name);
+
+ if(unlikely(strcmp(file->d_name, ".") == 0 || strcmp(file->d_name, "..") == 0)) continue;
+
+ int len = (int) strlen(file->d_name);
+ if(unlikely(len <= (int)PLUGINSD_FILE_SUFFIX_LEN)) continue;
+ if(unlikely(strcmp(PLUGINSD_FILE_SUFFIX, &file->d_name[len - (int)PLUGINSD_FILE_SUFFIX_LEN]) != 0)) {
+ debug(D_PLUGINSD, "file '%s' does not end in '%s'", file->d_name, PLUGINSD_FILE_SUFFIX);
+ continue;
+ }
+
+ char pluginname[CONFIG_MAX_NAME + 1];
+ snprintfz(pluginname, CONFIG_MAX_NAME, "%.*s", (int)(len - PLUGINSD_FILE_SUFFIX_LEN), file->d_name);
+ int enabled = config_get_boolean(CONFIG_SECTION_PLUGINS, pluginname, automatic_run);
+
+ if(unlikely(!enabled)) {
+ debug(D_PLUGINSD, "plugin '%s' is not enabled", file->d_name);
+ continue;
+ }
+
+ // check if it runs already
+ struct plugind *cd;
+ for(cd = pluginsd_root ; cd ; cd = cd->next)
+ if(unlikely(strcmp(cd->filename, file->d_name) == 0)) break;
+
+ if(likely(cd && !cd->obsolete)) {
+ debug(D_PLUGINSD, "plugin '%s' is already running", cd->filename);
+ continue;
+ }
+
+ // it is not running
+ // allocate a new one, or use the obsolete one
+ if(unlikely(!cd)) {
+ cd = callocz(sizeof(struct plugind), 1);
+
+ snprintfz(cd->id, CONFIG_MAX_NAME, "plugin:%s", pluginname);
+
+ strncpyz(cd->filename, file->d_name, FILENAME_MAX);
+ snprintfz(cd->fullfilename, FILENAME_MAX, "%s/%s", directory_name, cd->filename);
+
+ cd->enabled = enabled;
+ cd->update_every = (int) config_get_number(cd->id, "update every", localhost->rrd_update_every);
+ cd->started_t = now_realtime_sec();
+
+ char *def = "";
+ snprintfz(cd->cmd, PLUGINSD_CMD_MAX, "exec %s %d %s", cd->fullfilename, cd->update_every, config_get(cd->id, "command options", def));
+
+ // link it
+ if(likely(pluginsd_root)) cd->next = pluginsd_root;
+ pluginsd_root = cd;
+
+ // it is not currently running
+ cd->obsolete = 1;
+
+ if(cd->enabled) {
+ char tag[NETDATA_THREAD_TAG_MAX + 1];
+ snprintfz(tag, NETDATA_THREAD_TAG_MAX, "PLUGINSD[%s]", pluginname);
+ // spawn a new thread for it
+ netdata_thread_create(&cd->thread, tag, NETDATA_THREAD_OPTION_DEFAULT, pluginsd_worker_thread, cd);
+ }
+ }
+ }
+
+ closedir(dir);
+ }
+
+ sleep((unsigned int) scan_frequency);
+ }
+
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
diff --git a/collectors/plugins.d/plugins_d.h b/collectors/plugins.d/plugins_d.h
new file mode 100644
index 000000000..adccf3f0f
--- /dev/null
+++ b/collectors/plugins.d/plugins_d.h
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_PLUGINS_D_H
+#define NETDATA_PLUGINS_D_H 1
+
+#include "../../daemon/common.h"
+
+#define NETDATA_PLUGIN_HOOK_PLUGINSD \
+ { \
+ .name = "PLUGINSD", \
+ .config_section = NULL, \
+ .config_name = NULL, \
+ .enabled = 1, \
+ .thread = NULL, \
+ .init_routine = NULL, \
+ .start_routine = pluginsd_main \
+ },
+
+
+#define PLUGINSD_FILE_SUFFIX ".plugin"
+#define PLUGINSD_FILE_SUFFIX_LEN strlen(PLUGINSD_FILE_SUFFIX)
+#define PLUGINSD_CMD_MAX (FILENAME_MAX*2)
+
+#define PLUGINSD_KEYWORD_CHART "CHART"
+#define PLUGINSD_KEYWORD_DIMENSION "DIMENSION"
+#define PLUGINSD_KEYWORD_BEGIN "BEGIN"
+#define PLUGINSD_KEYWORD_END "END"
+#define PLUGINSD_KEYWORD_FLUSH "FLUSH"
+#define PLUGINSD_KEYWORD_DISABLE "DISABLE"
+#define PLUGINSD_KEYWORD_VARIABLE "VARIABLE"
+
+#define PLUGINSD_LINE_MAX 1024
+#define PLUGINSD_MAX_WORDS 20
+
+#define PLUGINSD_MAX_DIRECTORIES 20
+extern char *plugin_directories[PLUGINSD_MAX_DIRECTORIES];
+
+struct plugind {
+ char id[CONFIG_MAX_NAME+1]; // config node id
+
+ char filename[FILENAME_MAX+1]; // just the filename
+ char fullfilename[FILENAME_MAX+1]; // with path
+ char cmd[PLUGINSD_CMD_MAX+1]; // the command that it executes
+
+ volatile pid_t pid;
+ netdata_thread_t thread;
+
+ size_t successful_collections; // the number of times we have seen
+ // values collected from this plugin
+
+ size_t serial_failures; // the number of times the plugin started
+ // without collecting values
+
+ int update_every; // the plugin default data collection frequency
+ volatile sig_atomic_t obsolete; // do not touch this structure after setting this to 1
+ volatile sig_atomic_t enabled; // if this is enabled or not
+
+ time_t started_t;
+
+ struct plugind *next;
+};
+
+extern struct plugind *pluginsd_root;
+
+extern void *pluginsd_main(void *ptr);
+
+extern size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp, int trust_durations);
+extern int pluginsd_split_words(char *str, char **words, int max_words);
+
+extern int quoted_strings_splitter(char *str, char **words, int max_words, int (*custom_isspace)(char));
+extern int config_isspace(char c);
+
+#endif /* NETDATA_PLUGINS_D_H */
diff --git a/collectors/proc.plugin/Makefile.am b/collectors/proc.plugin/Makefile.am
new file mode 100644
index 000000000..19554bed8
--- /dev/null
+++ b/collectors/proc.plugin/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/collectors/proc.plugin/Makefile.in b/collectors/proc.plugin/Makefile.in
new file mode 100644
index 000000000..f6db90c87
--- /dev/null
+++ b/collectors/proc.plugin/Makefile.in
@@ -0,0 +1,464 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = collectors/proc.plugin
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
+ $(dist_noinst_DATA)
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/proc.plugin/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu collectors/proc.plugin/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/collectors/proc.plugin/README.md b/collectors/proc.plugin/README.md
new file mode 100644
index 000000000..9d444f3d0
--- /dev/null
+++ b/collectors/proc.plugin/README.md
@@ -0,0 +1,200 @@
+
+# proc.plugin
+
+ - `/proc/net/dev` (all network interfaces for all their values)
+ - `/proc/diskstats` (all disks for all their values)
+ - `/proc/net/snmp` (total IPv4, TCP and UDP usage)
+ - `/proc/net/snmp6` (total IPv6 usage)
+ - `/proc/net/netstat` (more IPv4 usage)
+ - `/proc/net/stat/nf_conntrack` (connection tracking performance)
+ - `/proc/net/stat/synproxy` (synproxy performance)
+ - `/proc/net/ip_vs/stats` (IPVS connection statistics)
+ - `/proc/stat` (CPU utilization)
+ - `/proc/meminfo` (memory information)
+ - `/proc/vmstat` (system performance)
+ - `/proc/net/rpc/nfsd` (NFS server statistics for both v3 and v4 NFS servers)
+ - `/sys/fs/cgroup` (Control Groups - Linux Containers)
+ - `/proc/self/mountinfo` (mount points)
+ - `/proc/interrupts` (total and per core hardware interrupts)
+ - `/proc/softirqs` (total and per core software interrupts)
+ - `/proc/loadavg` (system load and total processes running)
+ - `/proc/sys/kernel/random/entropy_avail` (random numbers pool availability - used in cryptography)
+ - `ksm` Kernel Same-Page Merging performance (several files under `/sys/kernel/mm/ksm`).
+ - `netdata` (internal netdata resources utilization)
+
+
+---
+
+# Monitoring Disks
+
+> Live demo of disk monitoring at: **[http://london.netdata.rocks](https://registry.my-netdata.io/#menu_disk)**
+
+Performance monitoring for Linux disks is quite complicated. The main reason is the plethora of disk technologies available. There are many different hardware disk technologies, but there are even more **virtual disk** technologies that can provide additional storage features.
+
+Hopefully, the Linux kernel provides many metrics that can provide deep insights of what our disks our doing. The kernel measures all these metrics on all layers of storage: **virtual disks**, **physical disks** and **partitions of disks**.
+
+Let's see the list of metrics provided by netdata for each of the above:
+
+### I/O bandwidth/s (kb/s)
+
+The amount of data transferred from and to the disk.
+
+### I/O operations/s
+
+The number of I/O operations completed.
+
+### Queued I/O operations
+
+The number of currently queued I/O operations. For traditional disks that execute commands one after another, one of them is being run by the disk and the rest are just waiting in a queue.
+
+### Backlog size (time in ms)
+
+The expected duration of the currently queued I/O operations.
+
+### Utilization (time percentage)
+
+The percentage of time the disk was busy with something. This is a very interesting metric, since for most disks, that execute commands sequentially, **this is the key indication of congestion**. A sequential disk that is 100% of the available time busy, has no time to do anything more, so even if the bandwidth or the number of operations executed by the disk is low, its capacity has been reached.
+
+Of course, for newer disk technologies (like fusion cards) that are capable to execute multiple commands in parallel, this metric is just meaningless.
+
+### Average I/O operation time (ms)
+
+The average time for I/O requests issued to the device to be served. This includes the time spent by the requests in queue and the time spent servicing them.
+
+### Average I/O operation size (kb)
+
+The average amount of data of the completed I/O operations.
+
+### Average Service Time (ms)
+
+The average service time for completed I/O operations. This metric is calculated using the total busy time of the disk and the number of completed operations. If the disk is able to execute multiple parallel operations the reporting average service time will be misleading.
+
+### Merged I/O operations/s
+
+The Linux kernel is capable of merging I/O operations. So, if two requests to read data from the disk are adjacent, the Linux kernel may merge them to one before giving them to disk. This metric measures the number of operations that have been merged by the Linux kernel.
+
+### Total I/O time
+
+The sum of the duration of all completed I/O operations. This number can exceed the interval if the disk is able to execute multiple I/O operations in parallel.
+
+### Space usage
+
+For mounted disks, netdata will provide a chart for their space, with 3 dimensions:
+
+1. free
+2. used
+3. reserved for root
+
+### inode usage
+
+For mounted disks, netdata will provide a chart for their inodes (number of file and directories), with 3 dimensions:
+
+1. free
+2. used
+3. reserved for root
+
+---
+
+## disk names
+
+netdata will automatically set the name of disks on the dashboard, from the mount point they are mounted, of course only when they are mounted. Changes in mount points are not currently detected (you will have to restart netdata to change the name of the disk).
+
+---
+
+## performance metrics
+
+By default netdata will enable monitoring metrics only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though).
+
+netdata categorizes all block devices in 3 categories:
+
+1. physical disks (i.e. block devices that does not have slaves and are not partitions)
+2. virtual disks (i.e. block devices that have slaves - like RAID devices)
+3. disk partitions (i.e. block devices that are part of a physical disk)
+
+Performance metrics are enabled by default for all disk devices, except partitions and not-mounted virtual disks. Of course, you can enable/disable monitoring any block device by editing the netdata configuration file.
+
+### netdata configuration
+
+You can get the running netdata configuration using this:
+
+```sh
+cd /etc/netdata
+curl "http://localhost:19999/netdata.conf" >netdata.conf.new
+mv netdata.conf.new netdata.conf
+```
+
+Then edit `netdata.conf` and find the following section. This is the basic plugin configuration.
+
+```
+[plugin:proc:/proc/diskstats]
+ # enable new disks detected at runtime = yes
+ # performance metrics for physical disks = auto
+ # performance metrics for virtual disks = no
+ # performance metrics for partitions = no
+ # performance metrics for mounted filesystems = no
+ # performance metrics for mounted virtual disks = auto
+ # space metrics for mounted filesystems = auto
+ # bandwidth for all disks = auto
+ # operations for all disks = auto
+ # merged operations for all disks = auto
+ # i/o time for all disks = auto
+ # queued operations for all disks = auto
+ # utilization percentage for all disks = auto
+ # backlog for all disks = auto
+ # space usage for all disks = auto
+ # inodes usage for all disks = auto
+ # filename to monitor = /proc/diskstats
+ # path to get block device infos = /sys/dev/block/%lu:%lu/%s
+ # path to get h/w sector size = /sys/block/%s/queue/hw_sector_size
+ # path to get h/w sector size for partitions = /sys/dev/block/%lu:%lu/subsystem/%s/../queue
+/hw_sector_size
+
+```
+
+For each virtual disk, physical disk and partition you will have a section like this:
+
+```
+[plugin:proc:/proc/diskstats:sda]
+ # enable = yes
+ # enable performance metrics = auto
+ # bandwidth = auto
+ # operations = auto
+ # merged operations = auto
+ # i/o time = auto
+ # queued operations = auto
+ # utilization percentage = auto
+ # backlog = auto
+```
+
+For all configuration options:
+- `auto` = enable monitoring if the collected values are not zero
+- `yes` = enable monitoring
+- `no` = disable monitoring
+
+Of course, to set options, you will have to uncomment them. The comments show the internal defaults.
+
+After saving `/etc/netdata/netdata.conf`, restart your netdata to apply them.
+
+#### Disabling performance metrics for individual device and to multiple devices by device type
+You can pretty easy disable performance metrics for individual device, for ex.:
+```
+[plugin:proc:/proc/diskstats:sda]
+ enable performance metrics = no
+```
+But sometimes you need disable performance metrics for all devices with the same type, to do it you need to figure out device type from `/proc/diskstats` for ex.:
+```
+ 7 0 loop0 1651 0 3452 168 0 0 0 0 0 8 168
+ 7 1 loop1 4955 0 11924 880 0 0 0 0 0 64 880
+ 7 2 loop2 36 0 216 4 0 0 0 0 0 4 4
+ 7 6 loop6 0 0 0 0 0 0 0 0 0 0 0
+ 7 7 loop7 0 0 0 0 0 0 0 0 0 0 0
+ 251 2 zram2 27487 0 219896 188 79953 0 639624 1640 0 1828 1828
+ 251 3 zram3 27348 0 218784 152 79952 0 639616 1960 0 2060 2104
+```
+All zram devices starts with `251` number and all loop devices starts with `7`.
+So, to disable performance metrics for all loop devices you could add `performance metrics for disks with major 7 = no` to `[plugin:proc:/proc/diskstats]` section.
+```
+[plugin:proc:/proc/diskstats]
+ performance metrics for disks with major 7 = no
+```
+
diff --git a/collectors/proc.plugin/ipc.c b/collectors/proc.plugin/ipc.c
new file mode 100644
index 000000000..6c6bee519
--- /dev/null
+++ b/collectors/proc.plugin/ipc.c
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#include <sys/sem.h>
+#include <sys/msg.h>
+#include <sys/shm.h>
+
+
+#ifndef SEMVMX
+#define SEMVMX 32767 /* <= 32767 semaphore maximum value */
+#endif
+
+/* Some versions of libc only define IPC_INFO when __USE_GNU is defined. */
+#ifndef IPC_INFO
+#define IPC_INFO 3
+#endif
+
+struct ipc_limits {
+ uint64_t shmmni; /* max number of segments */
+ uint64_t shmmax; /* max segment size */
+ uint64_t shmall; /* max total shared memory */
+ uint64_t shmmin; /* min segment size */
+
+ int semmni; /* max number of arrays */
+ int semmsl; /* max semaphores per array */
+ int semmns; /* max semaphores system wide */
+ int semopm; /* max ops per semop call */
+ unsigned int semvmx; /* semaphore max value (constant) */
+
+ int msgmni; /* max queues system wide */
+ size_t msgmax; /* max size of message */
+ int msgmnb; /* default max size of queue */
+};
+
+struct ipc_status {
+ int semusz; /* current number of arrays */
+ int semaem; /* current semaphores system wide */
+};
+
+/*
+ * The last arg of semctl is a union semun, but where is it defined? X/OPEN
+ * tells us to define it ourselves, but until recently Linux include files
+ * would also define it.
+ */
+#ifndef HAVE_UNION_SEMUN
+/* according to X/OPEN we have to define it ourselves */
+union semun {
+ int val;
+ struct semid_ds *buf;
+ unsigned short int *array;
+ struct seminfo *__buf;
+};
+#endif
+
+static inline int ipc_sem_get_limits(struct ipc_limits *lim) {
+ static procfile *ff = NULL;
+ static int error_shown = 0;
+ static char filename[FILENAME_MAX + 1] = "";
+
+ if(unlikely(!filename[0]))
+ snprintfz(filename, FILENAME_MAX, "%s/proc/sys/kernel/sem", netdata_configured_host_prefix);
+
+ if(unlikely(!ff)) {
+ ff = procfile_open(filename, NULL, PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) {
+ if(unlikely(!error_shown)) {
+ error("IPC: Cannot open file '%s'.", filename);
+ error_shown = 1;
+ }
+ goto ipc;
+ }
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) {
+ if(unlikely(!error_shown)) {
+ error("IPC: Cannot read file '%s'.", filename);
+ error_shown = 1;
+ }
+ goto ipc;
+ }
+
+ if(procfile_lines(ff) >= 1 && procfile_linewords(ff, 0) >= 4) {
+ lim->semvmx = SEMVMX;
+ lim->semmsl = str2i(procfile_lineword(ff, 0, 0));
+ lim->semmns = str2i(procfile_lineword(ff, 0, 1));
+ lim->semopm = str2i(procfile_lineword(ff, 0, 2));
+ lim->semmni = str2i(procfile_lineword(ff, 0, 3));
+ return 0;
+ }
+ else {
+ if(unlikely(!error_shown)) {
+ error("IPC: Invalid content in file '%s'.", filename);
+ error_shown = 1;
+ }
+ goto ipc;
+ }
+
+ipc:
+ // cannot do it from the file
+ // query IPC
+ {
+ struct seminfo seminfo = {.semmni = 0};
+ union semun arg = {.array = (ushort *) &seminfo};
+
+ if(unlikely(semctl(0, 0, IPC_INFO, arg) < 0)) {
+ error("IPC: Failed to read '%s' and request IPC_INFO with semctl().", filename);
+ goto error;
+ }
+
+ lim->semvmx = SEMVMX;
+ lim->semmni = seminfo.semmni;
+ lim->semmsl = seminfo.semmsl;
+ lim->semmns = seminfo.semmns;
+ lim->semopm = seminfo.semopm;
+ return 0;
+ }
+
+error:
+ lim->semvmx = 0;
+ lim->semmni = 0;
+ lim->semmsl = 0;
+ lim->semmns = 0;
+ lim->semopm = 0;
+ return -1;
+}
+
+/*
+printf ("------ Semaphore Limits --------\n");
+printf ("max number of arrays = %d\n", limits.semmni);
+printf ("max semaphores per array = %d\n", limits.semmsl);
+printf ("max semaphores system wide = %d\n", limits.semmns);
+printf ("max ops per semop call = %d\n", limits.semopm);
+printf ("semaphore max value = %u\n", limits.semvmx);
+
+printf ("------ Semaphore Status --------\n");
+printf ("used arrays = %d\n", status.semusz);
+printf ("allocated semaphores = %d\n", status.semaem);
+*/
+
+static inline int ipc_sem_get_status(struct ipc_status *st) {
+ struct seminfo seminfo;
+ union semun arg;
+
+ arg.array = (ushort *) (void *) &seminfo;
+
+ if(unlikely(semctl (0, 0, SEM_INFO, arg) < 0)) {
+ /* kernel not configured for semaphores */
+ static int error_shown = 0;
+ if(unlikely(!error_shown)) {
+ error("IPC: kernel is not configured for semaphores");
+ error_shown = 1;
+ }
+ st->semusz = 0;
+ st->semaem = 0;
+ return -1;
+ }
+
+ st->semusz = seminfo.semusz;
+ st->semaem = seminfo.semaem;
+ return 0;
+}
+
+int do_ipc(int update_every, usec_t dt) {
+ (void)dt;
+
+ static int initialized = 0, read_limits_next = -1;
+ static struct ipc_limits limits;
+ static struct ipc_status status;
+ static RRDVAR *arrays_max = NULL, *semaphores_max = NULL;
+ static RRDSET *st_semaphores = NULL, *st_arrays = NULL;
+ static RRDDIM *rd_semaphores = NULL, *rd_arrays = NULL;
+
+ if(unlikely(!initialized)) {
+ initialized = 1;
+
+ // make sure it works
+ if(ipc_sem_get_limits(&limits) == -1) {
+ error("unable to fetch semaphore limits");
+ return 1;
+ }
+
+ // make sure it works
+ if(ipc_sem_get_status(&status) == -1) {
+ error("unable to fetch semaphore statistics");
+ return 1;
+ }
+
+ // create the charts
+ if(unlikely(!st_semaphores)) {
+ st_semaphores = rrdset_create_localhost(
+ "system"
+ , "ipc_semaphores"
+ , NULL
+ , "ipc semaphores"
+ , NULL
+ , "IPC Semaphores"
+ , "semaphores"
+ , PLUGIN_PROC_NAME
+ , "ipc"
+ , NETDATA_CHART_PRIO_SYSTEM_IPC_SEMAPHORES
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_AREA
+ );
+ rd_semaphores = rrddim_add(st_semaphores, "semaphores", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ if(unlikely(!st_arrays)) {
+ st_arrays = rrdset_create_localhost(
+ "system"
+ , "ipc_semaphore_arrays"
+ , NULL
+ , "ipc semaphores"
+ , NULL
+ , "IPC Semaphore Arrays"
+ , "arrays"
+ , PLUGIN_PROC_NAME
+ , "ipc"
+ , NETDATA_CHART_PRIO_SYSTEM_IPC_SEM_ARRAYS
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_AREA
+ );
+ rd_arrays = rrddim_add(st_arrays, "arrays", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ // variables
+ semaphores_max = rrdvar_custom_host_variable_create(localhost, "ipc_semaphores_max");
+ arrays_max = rrdvar_custom_host_variable_create(localhost, "ipc_semaphores_arrays_max");
+ }
+
+ if(unlikely(read_limits_next < 0)) {
+ if(unlikely(ipc_sem_get_limits(&limits) == -1)) {
+ error("Unable to fetch semaphore limits.");
+ }
+ else {
+ if(semaphores_max) rrdvar_custom_host_variable_set(localhost, semaphores_max, limits.semmns);
+ if(arrays_max) rrdvar_custom_host_variable_set(localhost, arrays_max, limits.semmni);
+
+ st_arrays->red = limits.semmni;
+ st_semaphores->red = limits.semmns;
+
+ read_limits_next = 60 / update_every;
+ }
+ }
+ else
+ read_limits_next--;
+
+ if(unlikely(ipc_sem_get_status(&status) == -1)) {
+ error("Unable to get semaphore statistics");
+ return 0;
+ }
+
+ if(st_semaphores->counter_done) rrdset_next(st_semaphores);
+ rrddim_set_by_pointer(st_semaphores, rd_semaphores, status.semaem);
+ rrdset_done(st_semaphores);
+
+ if(st_arrays->counter_done) rrdset_next(st_arrays);
+ rrddim_set_by_pointer(st_arrays, rd_arrays, status.semusz);
+ rrdset_done(st_arrays);
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/plugin_proc.c b/collectors/proc.plugin/plugin_proc.c
new file mode 100644
index 000000000..0c3244d61
--- /dev/null
+++ b/collectors/proc.plugin/plugin_proc.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+static struct proc_module {
+ const char *name;
+ const char *dim;
+
+ int enabled;
+
+ int (*func)(int update_every, usec_t dt);
+ usec_t duration;
+
+ RRDDIM *rd;
+
+} proc_modules[] = {
+
+ // system metrics
+ { .name = "/proc/stat", .dim = "stat", .func = do_proc_stat },
+ { .name = "/proc/uptime", .dim = "uptime", .func = do_proc_uptime },
+ { .name = "/proc/loadavg", .dim = "loadavg", .func = do_proc_loadavg },
+ { .name = "/proc/sys/kernel/random/entropy_avail", .dim = "entropy", .func = do_proc_sys_kernel_random_entropy_avail },
+
+ // CPU metrics
+ { .name = "/proc/interrupts", .dim = "interrupts", .func = do_proc_interrupts },
+ { .name = "/proc/softirqs", .dim = "softirqs", .func = do_proc_softirqs },
+
+ // memory metrics
+ { .name = "/proc/vmstat", .dim = "vmstat", .func = do_proc_vmstat },
+ { .name = "/proc/meminfo", .dim = "meminfo", .func = do_proc_meminfo },
+ { .name = "/sys/kernel/mm/ksm", .dim = "ksm", .func = do_sys_kernel_mm_ksm },
+ { .name = "/sys/devices/system/edac/mc", .dim = "ecc", .func = do_proc_sys_devices_system_edac_mc },
+ { .name = "/sys/devices/system/node", .dim = "numa", .func = do_proc_sys_devices_system_node },
+
+ // network metrics
+ { .name = "/proc/net/dev", .dim = "netdev", .func = do_proc_net_dev },
+ { .name = "/proc/net/sockstat", .dim = "sockstat", .func = do_proc_net_sockstat },
+ { .name = "/proc/net/sockstat6", .dim = "sockstat6", .func = do_proc_net_sockstat6 },
+ { .name = "/proc/net/netstat", .dim = "netstat", .func = do_proc_net_netstat }, // this has to be before /proc/net/snmp, because there is a shared metric
+ { .name = "/proc/net/snmp", .dim = "snmp", .func = do_proc_net_snmp },
+ { .name = "/proc/net/snmp6", .dim = "snmp6", .func = do_proc_net_snmp6 },
+ { .name = "/proc/net/sctp/snmp", .dim = "sctp", .func = do_proc_net_sctp_snmp },
+ { .name = "/proc/net/softnet_stat", .dim = "softnet", .func = do_proc_net_softnet_stat },
+ { .name = "/proc/net/ip_vs/stats", .dim = "ipvs", .func = do_proc_net_ip_vs_stats },
+
+ // firewall metrics
+ { .name = "/proc/net/stat/conntrack", .dim = "conntrack", .func = do_proc_net_stat_conntrack },
+ { .name = "/proc/net/stat/synproxy", .dim = "synproxy", .func = do_proc_net_stat_synproxy },
+
+ // disk metrics
+ { .name = "/proc/diskstats", .dim = "diskstats", .func = do_proc_diskstats },
+
+ // NFS metrics
+ { .name = "/proc/net/rpc/nfsd", .dim = "nfsd", .func = do_proc_net_rpc_nfsd },
+ { .name = "/proc/net/rpc/nfs", .dim = "nfs", .func = do_proc_net_rpc_nfs },
+
+ // ZFS metrics
+ { .name = "/proc/spl/kstat/zfs/arcstats", .dim = "zfs_arcstats", .func = do_proc_spl_kstat_zfs_arcstats },
+
+ // BTRFS metrics
+ { .name = "/sys/fs/btrfs", .dim = "btrfs", .func = do_sys_fs_btrfs },
+
+ // IPC metrics
+ { .name = "ipc", .dim = "ipc", .func = do_ipc },
+
+ // the terminator of this array
+ { .name = NULL, .dim = NULL, .func = NULL }
+};
+
+static void proc_main_cleanup(void *ptr) {
+ struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
+
+ info("cleaning up...");
+
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+}
+
+void *proc_main(void *ptr) {
+ netdata_thread_cleanup_push(proc_main_cleanup, ptr);
+
+ int vdo_cpu_netdata = config_get_boolean("plugin:proc", "netdata server resources", 1);
+
+ // check the enabled status for each module
+ int i;
+ for(i = 0 ; proc_modules[i].name ;i++) {
+ struct proc_module *pm = &proc_modules[i];
+
+ pm->enabled = config_get_boolean("plugin:proc", pm->name, 1);
+ pm->duration = 0ULL;
+ pm->rd = NULL;
+ }
+
+ usec_t step = localhost->rrd_update_every * USEC_PER_SEC;
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ size_t iterations = 0;
+
+ while(!netdata_exit) {
+ iterations++;
+ (void)iterations;
+
+ usec_t hb_dt = heartbeat_next(&hb, step);
+ usec_t duration = 0ULL;
+
+ if(unlikely(netdata_exit)) break;
+
+ // BEGIN -- the job to be done
+
+ for(i = 0 ; proc_modules[i].name ;i++) {
+ struct proc_module *pm = &proc_modules[i];
+ if(unlikely(!pm->enabled)) continue;
+
+ debug(D_PROCNETDEV_LOOP, "PROC calling %s.", pm->name);
+
+//#ifdef NETDATA_LOG_ALLOCATIONS
+// if(pm->func == do_proc_interrupts)
+// log_thread_memory_allocations = iterations;
+//#endif
+ pm->enabled = !pm->func(localhost->rrd_update_every, hb_dt);
+ pm->duration = heartbeat_monotonic_dt_to_now_usec(&hb) - duration;
+ duration += pm->duration;
+
+//#ifdef NETDATA_LOG_ALLOCATIONS
+// if(pm->func == do_proc_interrupts)
+// log_thread_memory_allocations = 0;
+//#endif
+
+ if(unlikely(netdata_exit)) break;
+ }
+
+ // END -- the job is done
+
+ // --------------------------------------------------------------------
+
+ if(vdo_cpu_netdata) {
+ static RRDSET *st = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_find_bytype_localhost("netdata", "plugin_proc_modules");
+
+ if(!st) {
+ st = rrdset_create_localhost(
+ "netdata"
+ , "plugin_proc_modules"
+ , NULL
+ , "proc"
+ , NULL
+ , "NetData Proc Plugin Modules Durations"
+ , "milliseconds/run"
+ , "netdata"
+ , "stats"
+ , 132001
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ for(i = 0 ; proc_modules[i].name ;i++) {
+ struct proc_module *pm = &proc_modules[i];
+ if(unlikely(!pm->enabled)) continue;
+
+ pm->rd = rrddim_add(st, pm->dim, NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ }
+ }
+ }
+ else rrdset_next(st);
+
+ for(i = 0 ; proc_modules[i].name ;i++) {
+ struct proc_module *pm = &proc_modules[i];
+ if(unlikely(!pm->enabled)) continue;
+
+ rrddim_set_by_pointer(st, pm->rd, pm->duration);
+ }
+ rrdset_done(st);
+
+ global_statistics_charts();
+ registry_statistics();
+ }
+ }
+
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
+
+int get_numa_node_count(void)
+{
+ static int numa_node_count = -1;
+
+ if (numa_node_count != -1)
+ return numa_node_count;
+
+ numa_node_count = 0;
+
+ char name[FILENAME_MAX + 1];
+ snprintfz(name, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/node");
+ char *dirname = config_get("plugin:proc:/sys/devices/system/node", "directory to monitor", name);
+
+ DIR *dir = opendir(dirname);
+ if(dir) {
+ struct dirent *de = NULL;
+ while((de = readdir(dir))) {
+ if(de->d_type != DT_DIR)
+ continue;
+
+ if(strncmp(de->d_name, "node", 4) != 0)
+ continue;
+
+ if(!isdigit(de->d_name[4]))
+ continue;
+
+ numa_node_count++;
+ }
+ closedir(dir);
+ }
+
+ return numa_node_count;
+}
diff --git a/collectors/proc.plugin/plugin_proc.h b/collectors/proc.plugin/plugin_proc.h
new file mode 100644
index 000000000..bfefe1ad4
--- /dev/null
+++ b/collectors/proc.plugin/plugin_proc.h
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_PLUGIN_PROC_H
+#define NETDATA_PLUGIN_PROC_H 1
+
+#include "../../daemon/common.h"
+
+#if (TARGET_OS == OS_LINUX)
+
+#define NETDATA_PLUGIN_HOOK_LINUX_PROC \
+ { \
+ .name = "PLUGIN[proc]", \
+ .config_section = CONFIG_SECTION_PLUGINS, \
+ .config_name = "proc", \
+ .enabled = 1, \
+ .thread = NULL, \
+ .init_routine = NULL, \
+ .start_routine = proc_main \
+ },
+
+
+#define PLUGIN_PROC_CONFIG_NAME "proc"
+#define PLUGIN_PROC_NAME PLUGIN_PROC_CONFIG_NAME ".plugin"
+
+extern void *proc_main(void *ptr);
+
+extern int do_proc_net_dev(int update_every, usec_t dt);
+extern int do_proc_diskstats(int update_every, usec_t dt);
+extern int do_proc_net_snmp(int update_every, usec_t dt);
+extern int do_proc_net_snmp6(int update_every, usec_t dt);
+extern int do_proc_net_netstat(int update_every, usec_t dt);
+extern int do_proc_net_stat_conntrack(int update_every, usec_t dt);
+extern int do_proc_net_ip_vs_stats(int update_every, usec_t dt);
+extern int do_proc_stat(int update_every, usec_t dt);
+extern int do_proc_meminfo(int update_every, usec_t dt);
+extern int do_proc_vmstat(int update_every, usec_t dt);
+extern int do_proc_net_rpc_nfs(int update_every, usec_t dt);
+extern int do_proc_net_rpc_nfsd(int update_every, usec_t dt);
+extern int do_proc_sys_kernel_random_entropy_avail(int update_every, usec_t dt);
+extern int do_proc_interrupts(int update_every, usec_t dt);
+extern int do_proc_softirqs(int update_every, usec_t dt);
+extern int do_sys_kernel_mm_ksm(int update_every, usec_t dt);
+extern int do_proc_loadavg(int update_every, usec_t dt);
+extern int do_proc_net_stat_synproxy(int update_every, usec_t dt);
+extern int do_proc_net_softnet_stat(int update_every, usec_t dt);
+extern int do_proc_uptime(int update_every, usec_t dt);
+extern int do_proc_sys_devices_system_edac_mc(int update_every, usec_t dt);
+extern int do_proc_sys_devices_system_node(int update_every, usec_t dt);
+extern int do_proc_spl_kstat_zfs_arcstats(int update_every, usec_t dt);
+extern int do_sys_fs_btrfs(int update_every, usec_t dt);
+extern int do_proc_net_sockstat(int update_every, usec_t dt);
+extern int do_proc_net_sockstat6(int update_every, usec_t dt);
+extern int do_proc_net_sctp_snmp(int update_every, usec_t dt);
+extern int do_ipc(int update_every, usec_t dt);
+extern int get_numa_node_count(void);
+
+// metrics that need to be shared among data collectors
+extern unsigned long long tcpext_TCPSynRetrans;
+
+// netdev renames
+extern void netdev_rename_device_add(const char *host_device, const char *container_device, const char *container_name);
+extern void netdev_rename_device_del(const char *host_device);
+
+#include "proc_self_mountinfo.h"
+#include "zfs_common.h"
+
+#else // (TARGET_OS == OS_LINUX)
+
+#define NETDATA_PLUGIN_HOOK_LINUX_PROC
+
+#endif // (TARGET_OS == OS_LINUX)
+
+
+#endif /* NETDATA_PLUGIN_PROC_H */
diff --git a/collectors/proc.plugin/proc_diskstats.c b/collectors/proc.plugin/proc_diskstats.c
new file mode 100644
index 000000000..387b395a3
--- /dev/null
+++ b/collectors/proc.plugin/proc_diskstats.c
@@ -0,0 +1,1649 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define RRD_TYPE_DISK "disk"
+#define PLUGIN_PROC_MODULE_DISKSTATS_NAME "/proc/diskstats"
+#define CONFIG_SECTION_PLUGIN_PROC_DISKSTATS "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_DISKSTATS_NAME
+
+#define DISK_TYPE_UNKNOWN 0
+#define DISK_TYPE_PHYSICAL 1
+#define DISK_TYPE_PARTITION 2
+#define DISK_TYPE_VIRTUAL 3
+
+#define DEFAULT_EXCLUDED_DISKS "loop* ram*"
+
+static struct disk {
+ char *disk; // the name of the disk (sda, sdb, etc, after being looked up)
+ char *device; // the device of the disk (before being looked up)
+ unsigned long major;
+ unsigned long minor;
+ int sector_size;
+ int type;
+
+ char *mount_point;
+
+ // disk options caching
+ int do_io;
+ int do_ops;
+ int do_mops;
+ int do_iotime;
+ int do_qops;
+ int do_util;
+ int do_backlog;
+ int do_bcache;
+
+ int updated;
+
+ int device_is_bcache;
+
+ char *bcache_filename_dirty_data;
+ char *bcache_filename_writeback_rate;
+ char *bcache_filename_cache_congested;
+ char *bcache_filename_cache_available_percent;
+ char *bcache_filename_stats_five_minute_cache_hit_ratio;
+ char *bcache_filename_stats_hour_cache_hit_ratio;
+ char *bcache_filename_stats_day_cache_hit_ratio;
+ char *bcache_filename_stats_total_cache_hit_ratio;
+ char *bcache_filename_stats_total_cache_hits;
+ char *bcache_filename_stats_total_cache_misses;
+ char *bcache_filename_stats_total_cache_miss_collisions;
+ char *bcache_filename_stats_total_cache_bypass_hits;
+ char *bcache_filename_stats_total_cache_bypass_misses;
+ char *bcache_filename_stats_total_cache_readaheads;
+ char *bcache_filename_cache_read_races;
+ char *bcache_filename_cache_io_errors;
+ char *bcache_filename_priority_stats;
+
+ usec_t bcache_priority_stats_update_every_usec;
+ usec_t bcache_priority_stats_elapsed_usec;
+
+ RRDSET *st_io;
+ RRDDIM *rd_io_reads;
+ RRDDIM *rd_io_writes;
+
+ RRDSET *st_ops;
+ RRDDIM *rd_ops_reads;
+ RRDDIM *rd_ops_writes;
+
+ RRDSET *st_qops;
+ RRDDIM *rd_qops_operations;
+
+ RRDSET *st_backlog;
+ RRDDIM *rd_backlog_backlog;
+
+ RRDSET *st_util;
+ RRDDIM *rd_util_utilization;
+
+ RRDSET *st_mops;
+ RRDDIM *rd_mops_reads;
+ RRDDIM *rd_mops_writes;
+
+ RRDSET *st_iotime;
+ RRDDIM *rd_iotime_reads;
+ RRDDIM *rd_iotime_writes;
+
+ RRDSET *st_await;
+ RRDDIM *rd_await_reads;
+ RRDDIM *rd_await_writes;
+
+ RRDSET *st_avgsz;
+ RRDDIM *rd_avgsz_reads;
+ RRDDIM *rd_avgsz_writes;
+
+ RRDSET *st_svctm;
+ RRDDIM *rd_svctm_svctm;
+
+ RRDSET *st_bcache_size;
+ RRDDIM *rd_bcache_dirty_size;
+
+ RRDSET *st_bcache_usage;
+ RRDDIM *rd_bcache_available_percent;
+
+ RRDSET *st_bcache_hit_ratio;
+ RRDDIM *rd_bcache_hit_ratio_5min;
+ RRDDIM *rd_bcache_hit_ratio_1hour;
+ RRDDIM *rd_bcache_hit_ratio_1day;
+ RRDDIM *rd_bcache_hit_ratio_total;
+
+ RRDSET *st_bcache;
+ RRDDIM *rd_bcache_hits;
+ RRDDIM *rd_bcache_misses;
+ RRDDIM *rd_bcache_miss_collisions;
+
+ RRDSET *st_bcache_bypass;
+ RRDDIM *rd_bcache_bypass_hits;
+ RRDDIM *rd_bcache_bypass_misses;
+
+ RRDSET *st_bcache_rates;
+ RRDDIM *rd_bcache_rate_congested;
+ RRDDIM *rd_bcache_readaheads;
+ RRDDIM *rd_bcache_rate_writeback;
+
+ RRDSET *st_bcache_cache_allocations;
+ RRDDIM *rd_bcache_cache_allocations_unused;
+ RRDDIM *rd_bcache_cache_allocations_clean;
+ RRDDIM *rd_bcache_cache_allocations_dirty;
+ RRDDIM *rd_bcache_cache_allocations_metadata;
+ RRDDIM *rd_bcache_cache_allocations_unknown;
+
+ RRDSET *st_bcache_cache_read_races;
+ RRDDIM *rd_bcache_cache_read_races;
+ RRDDIM *rd_bcache_cache_io_errors;
+
+ struct disk *next;
+} *disk_root = NULL;
+
+#define rrdset_obsolete_and_pointer_null(st) do { if(st) { rrdset_is_obsolete(st); (st) = NULL; } } while(st)
+
+// static char *path_to_get_hw_sector_size = NULL;
+// static char *path_to_get_hw_sector_size_partitions = NULL;
+static char *path_to_sys_dev_block_major_minor_string = NULL;
+static char *path_to_sys_block_device = NULL;
+static char *path_to_sys_block_device_bcache = NULL;
+static char *path_to_sys_devices_virtual_block_device = NULL;
+static char *path_to_device_mapper = NULL;
+static char *path_to_device_label = NULL;
+static char *path_to_device_id = NULL;
+static char *path_to_veritas_volume_groups = NULL;
+static int name_disks_by_id = CONFIG_BOOLEAN_NO;
+static int global_bcache_priority_stats_update_every = 0; // disabled by default
+
+static int global_enable_new_disks_detected_at_runtime = CONFIG_BOOLEAN_YES,
+ global_enable_performance_for_physical_disks = CONFIG_BOOLEAN_AUTO,
+ global_enable_performance_for_virtual_disks = CONFIG_BOOLEAN_AUTO,
+ global_enable_performance_for_partitions = CONFIG_BOOLEAN_NO,
+ global_do_io = CONFIG_BOOLEAN_AUTO,
+ global_do_ops = CONFIG_BOOLEAN_AUTO,
+ global_do_mops = CONFIG_BOOLEAN_AUTO,
+ global_do_iotime = CONFIG_BOOLEAN_AUTO,
+ global_do_qops = CONFIG_BOOLEAN_AUTO,
+ global_do_util = CONFIG_BOOLEAN_AUTO,
+ global_do_backlog = CONFIG_BOOLEAN_AUTO,
+ global_do_bcache = CONFIG_BOOLEAN_AUTO,
+ globals_initialized = 0,
+ global_cleanup_removed_disks = 1;
+
+static SIMPLE_PATTERN *excluded_disks = NULL;
+
+static unsigned long long int bcache_read_number_with_units(const char *filename) {
+ char buffer[50 + 1];
+ if(read_file(filename, buffer, 50) == 0) {
+ static int unknown_units_error = 10;
+
+ char *end = NULL;
+ long double value = str2ld(buffer, &end);
+ if(end && *end) {
+ if(*end == 'k')
+ return (unsigned long long int)(value * 1024.0);
+ else if(*end == 'M')
+ return (unsigned long long int)(value * 1024.0 * 1024.0);
+ else if(*end == 'G')
+ return (unsigned long long int)(value * 1024.0 * 1024.0 * 1024.0);
+ else if(unknown_units_error > 0) {
+ error("bcache file '%s' provides value '%s' with unknown units '%s'", filename, buffer, end);
+ unknown_units_error--;
+ }
+ }
+
+ return (unsigned long long int)value;
+ }
+
+ return 0;
+}
+
+void bcache_read_priority_stats(struct disk *d, const char *family, int update_every, usec_t dt) {
+ static procfile *ff = NULL;
+ static char *separators = " \t:%[]";
+
+ static ARL_BASE *arl_base = NULL;
+
+ static unsigned long long unused;
+ static unsigned long long clean;
+ static unsigned long long dirty;
+ static unsigned long long metadata;
+ static unsigned long long unknown;
+
+ // check if it is time to update this metric
+ d->bcache_priority_stats_elapsed_usec += dt;
+ if(likely(d->bcache_priority_stats_elapsed_usec < d->bcache_priority_stats_update_every_usec)) return;
+ d->bcache_priority_stats_elapsed_usec = 0;
+
+ // initialize ARL
+ if(unlikely(!arl_base)) {
+ arl_base = arl_create("bcache/priority_stats", NULL, 60);
+ arl_expect(arl_base, "Unused", &unused);
+ arl_expect(arl_base, "Clean", &clean);
+ arl_expect(arl_base, "Dirty", &dirty);
+ arl_expect(arl_base, "Metadata", &metadata);
+ }
+
+ ff = procfile_reopen(ff, d->bcache_filename_priority_stats, separators, PROCFILE_FLAG_DEFAULT);
+ if(likely(ff)) ff = procfile_readall(ff);
+ if(unlikely(!ff)) {
+ separators = " \t:%[]";
+ return;
+ }
+
+ // do not reset the separators on every iteration
+ separators = NULL;
+
+ arl_begin(arl_base);
+ unused = clean = dirty = metadata = unknown = 0;
+
+ size_t lines = procfile_lines(ff), l;
+
+ for(l = 0; l < lines ;l++) {
+ size_t words = procfile_linewords(ff, l);
+ if(unlikely(words < 2)) {
+ if(unlikely(words)) error("Cannot read '%s' line %zu. Expected 2 params, read %zu.", d->bcache_filename_priority_stats, l, words);
+ continue;
+ }
+
+ if(unlikely(arl_check(arl_base,
+ procfile_lineword(ff, l, 0),
+ procfile_lineword(ff, l, 1)))) break;
+ }
+
+ unknown = 100 - unused - clean - dirty - metadata;
+
+ // create / update the cache allocations chart
+ {
+ if(unlikely(!d->st_bcache_cache_allocations)) {
+ d->st_bcache_cache_allocations = rrdset_create_localhost(
+ "disk_bcache_cache_alloc"
+ , d->device
+ , d->disk
+ , family
+ , "disk.bcache_cache_alloc"
+ , "BCache Cache Allocations"
+ , "percentage"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_DISKSTATS_NAME
+ , NETDATA_CHART_PRIO_BCACHE_CACHE_ALLOC
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ d->rd_bcache_cache_allocations_unused = rrddim_add(d->st_bcache_cache_allocations, "unused", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ d->rd_bcache_cache_allocations_dirty = rrddim_add(d->st_bcache_cache_allocations, "dirty", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ d->rd_bcache_cache_allocations_clean = rrddim_add(d->st_bcache_cache_allocations, "clean", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ d->rd_bcache_cache_allocations_metadata = rrddim_add(d->st_bcache_cache_allocations, "metadata", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ d->rd_bcache_cache_allocations_unknown = rrddim_add(d->st_bcache_cache_allocations, "undefined", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ d->bcache_priority_stats_update_every_usec = update_every * USEC_PER_SEC;
+ }
+ else rrdset_next(d->st_bcache_cache_allocations);
+
+ rrddim_set_by_pointer(d->st_bcache_cache_allocations, d->rd_bcache_cache_allocations_unused, unused);
+ rrddim_set_by_pointer(d->st_bcache_cache_allocations, d->rd_bcache_cache_allocations_dirty, dirty);
+ rrddim_set_by_pointer(d->st_bcache_cache_allocations, d->rd_bcache_cache_allocations_clean, clean);
+ rrddim_set_by_pointer(d->st_bcache_cache_allocations, d->rd_bcache_cache_allocations_metadata, metadata);
+ rrddim_set_by_pointer(d->st_bcache_cache_allocations, d->rd_bcache_cache_allocations_unknown, unknown);
+ rrdset_done(d->st_bcache_cache_allocations);
+ }
+}
+
+static inline int is_major_enabled(int major) {
+ static int8_t *major_configs = NULL;
+ static size_t major_size = 0;
+
+ if(major < 0) return 1;
+
+ size_t wanted_size = (size_t)major + 1;
+
+ if(major_size < wanted_size) {
+ major_configs = reallocz(major_configs, wanted_size * sizeof(int8_t));
+
+ size_t i;
+ for(i = major_size; i < wanted_size ; i++)
+ major_configs[i] = -1;
+
+ major_size = wanted_size;
+ }
+
+ if(major_configs[major] == -1) {
+ char buffer[CONFIG_MAX_NAME + 1];
+ snprintfz(buffer, CONFIG_MAX_NAME, "performance metrics for disks with major %d", major);
+ major_configs[major] = (char)config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, buffer, 1);
+ }
+
+ return (int)major_configs[major];
+}
+
+static inline int get_disk_name_from_path(const char *path, char *result, size_t result_size, unsigned long major, unsigned long minor, char *disk, char *prefix, int depth) {
+ //info("DEVICE-MAPPER ('%s', %lu:%lu): examining directory '%s' (allowed depth %d).", disk, major, minor, path, depth);
+
+ int found = 0;
+
+ DIR *dir = opendir(path);
+ if (!dir) {
+ error("DEVICE-MAPPER ('%s', %lu:%lu): Cannot open directory '%s'.", disk, major, minor, path);
+ goto failed;
+ }
+
+ struct dirent *de = NULL;
+ while ((de = readdir(dir))) {
+ if(de->d_type == DT_DIR) {
+ if((de->d_name[0] == '.' && de->d_name[1] == '\0') || (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0'))
+ continue;
+
+ if(depth <= 0) {
+ error("DEVICE-MAPPER ('%s', %lu:%lu): Depth limit reached for path '%s/%s'. Ignoring path.", disk, major, minor, path, de->d_name);
+ break;
+ }
+ else {
+ char *path_nested = NULL;
+ char *prefix_nested = NULL;
+
+ {
+ char buffer[FILENAME_MAX + 1];
+ snprintfz(buffer, FILENAME_MAX, "%s/%s", path, de->d_name);
+ path_nested = strdupz(buffer);
+
+ snprintfz(buffer, FILENAME_MAX, "%s%s%s", (prefix)?prefix:"", (prefix)?"_":"", de->d_name);
+ prefix_nested = strdupz(buffer);
+ }
+
+ found = get_disk_name_from_path(path_nested, result, result_size, major, minor, disk, prefix_nested, depth - 1);
+ freez(path_nested);
+ freez(prefix_nested);
+
+ if(found) break;
+ }
+ }
+ else if(de->d_type == DT_LNK || de->d_type == DT_BLK) {
+ char filename[FILENAME_MAX + 1];
+
+ if(de->d_type == DT_LNK) {
+ snprintfz(filename, FILENAME_MAX, "%s/%s", path, de->d_name);
+ ssize_t len = readlink(filename, result, result_size - 1);
+ if(len <= 0) {
+ error("DEVICE-MAPPER ('%s', %lu:%lu): Cannot read link '%s'.", disk, major, minor, filename);
+ continue;
+ }
+
+ result[len] = '\0';
+ if(result[0] != '/')
+ snprintfz(filename, FILENAME_MAX, "%s/%s", path, result);
+ else
+ strncpyz(filename, result, FILENAME_MAX);
+ }
+ else {
+ snprintfz(filename, FILENAME_MAX, "%s/%s", path, de->d_name);
+ }
+
+ struct stat sb;
+ if(stat(filename, &sb) == -1) {
+ error("DEVICE-MAPPER ('%s', %lu:%lu): Cannot stat() file '%s'.", disk, major, minor, filename);
+ continue;
+ }
+
+ if((sb.st_mode & S_IFMT) != S_IFBLK) {
+ //info("DEVICE-MAPPER ('%s', %lu:%lu): file '%s' is not a block device.", disk, major, minor, filename);
+ continue;
+ }
+
+ if(major(sb.st_rdev) != major || minor(sb.st_rdev) != minor) {
+ //info("DEVICE-MAPPER ('%s', %lu:%lu): filename '%s' does not match %lu:%lu.", disk, major, minor, filename, (unsigned long)major(sb.st_rdev), (unsigned long)minor(sb.st_rdev));
+ continue;
+ }
+
+ //info("DEVICE-MAPPER ('%s', %lu:%lu): filename '%s' matches.", disk, major, minor, filename);
+
+ snprintfz(result, result_size - 1, "%s%s%s", (prefix)?prefix:"", (prefix)?"_":"", de->d_name);
+ found = 1;
+ break;
+ }
+ }
+ closedir(dir);
+
+
+failed:
+
+ if(!found)
+ result[0] = '\0';
+
+ return found;
+}
+
+static inline char *get_disk_name(unsigned long major, unsigned long minor, char *disk) {
+ char result[FILENAME_MAX + 1] = "";
+
+ if(!path_to_device_mapper || !*path_to_device_mapper || !get_disk_name_from_path(path_to_device_mapper, result, FILENAME_MAX + 1, major, minor, disk, NULL, 0))
+ if(!path_to_device_label || !*path_to_device_label || !get_disk_name_from_path(path_to_device_label, result, FILENAME_MAX + 1, major, minor, disk, NULL, 0))
+ if(!path_to_veritas_volume_groups || !*path_to_veritas_volume_groups || !get_disk_name_from_path(path_to_veritas_volume_groups, result, FILENAME_MAX + 1, major, minor, disk, "vx", 2))
+ if(name_disks_by_id != CONFIG_BOOLEAN_YES || !path_to_device_id || !*path_to_device_id || !get_disk_name_from_path(path_to_device_id, result, FILENAME_MAX + 1, major, minor, disk, NULL, 0))
+ strncpy(result, disk, FILENAME_MAX);
+
+ if(!result[0])
+ strncpy(result, disk, FILENAME_MAX);
+
+ netdata_fix_chart_name(result);
+ return strdup(result);
+}
+
+static void get_disk_config(struct disk *d) {
+ int def_enable = global_enable_new_disks_detected_at_runtime;
+
+ if(def_enable != CONFIG_BOOLEAN_NO && (simple_pattern_matches(excluded_disks, d->device) || simple_pattern_matches(excluded_disks, d->disk)))
+ def_enable = CONFIG_BOOLEAN_NO;
+
+ char var_name[4096 + 1];
+ snprintfz(var_name, 4096, CONFIG_SECTION_PLUGIN_PROC_DISKSTATS ":%s", d->disk);
+
+ def_enable = config_get_boolean_ondemand(var_name, "enable", def_enable);
+ if(unlikely(def_enable == CONFIG_BOOLEAN_NO)) {
+ // the user does not want any metrics for this disk
+ d->do_io = CONFIG_BOOLEAN_NO;
+ d->do_ops = CONFIG_BOOLEAN_NO;
+ d->do_mops = CONFIG_BOOLEAN_NO;
+ d->do_iotime = CONFIG_BOOLEAN_NO;
+ d->do_qops = CONFIG_BOOLEAN_NO;
+ d->do_util = CONFIG_BOOLEAN_NO;
+ d->do_backlog = CONFIG_BOOLEAN_NO;
+ d->do_bcache = CONFIG_BOOLEAN_NO;
+ }
+ else {
+ // this disk is enabled
+ // check its direct settings
+
+ int def_performance = CONFIG_BOOLEAN_AUTO;
+
+ // since this is 'on demand' we can figure the performance settings
+ // based on the type of disk
+
+ if(!d->device_is_bcache) {
+ switch(d->type) {
+ default:
+ case DISK_TYPE_UNKNOWN:
+ break;
+
+ case DISK_TYPE_PHYSICAL:
+ def_performance = global_enable_performance_for_physical_disks;
+ break;
+
+ case DISK_TYPE_PARTITION:
+ def_performance = global_enable_performance_for_partitions;
+ break;
+
+ case DISK_TYPE_VIRTUAL:
+ def_performance = global_enable_performance_for_virtual_disks;
+ break;
+ }
+ }
+
+ // check if we have to disable performance for this disk
+ if(def_performance)
+ def_performance = is_major_enabled((int)d->major);
+
+ // ------------------------------------------------------------
+ // now we have def_performance and def_space
+ // to work further
+
+ // def_performance
+ // check the user configuration (this will also show our 'on demand' decision)
+ def_performance = config_get_boolean_ondemand(var_name, "enable performance metrics", def_performance);
+
+ int ddo_io = CONFIG_BOOLEAN_NO,
+ ddo_ops = CONFIG_BOOLEAN_NO,
+ ddo_mops = CONFIG_BOOLEAN_NO,
+ ddo_iotime = CONFIG_BOOLEAN_NO,
+ ddo_qops = CONFIG_BOOLEAN_NO,
+ ddo_util = CONFIG_BOOLEAN_NO,
+ ddo_backlog = CONFIG_BOOLEAN_NO,
+ ddo_bcache = CONFIG_BOOLEAN_NO;
+
+ // we enable individual performance charts only when def_performance is not disabled
+ if(unlikely(def_performance != CONFIG_BOOLEAN_NO)) {
+ ddo_io = global_do_io,
+ ddo_ops = global_do_ops,
+ ddo_mops = global_do_mops,
+ ddo_iotime = global_do_iotime,
+ ddo_qops = global_do_qops,
+ ddo_util = global_do_util,
+ ddo_backlog = global_do_backlog,
+ ddo_bcache = global_do_bcache;
+ }
+
+ d->do_io = config_get_boolean_ondemand(var_name, "bandwidth", ddo_io);
+ d->do_ops = config_get_boolean_ondemand(var_name, "operations", ddo_ops);
+ d->do_mops = config_get_boolean_ondemand(var_name, "merged operations", ddo_mops);
+ d->do_iotime = config_get_boolean_ondemand(var_name, "i/o time", ddo_iotime);
+ d->do_qops = config_get_boolean_ondemand(var_name, "queued operations", ddo_qops);
+ d->do_util = config_get_boolean_ondemand(var_name, "utilization percentage", ddo_util);
+ d->do_backlog = config_get_boolean_ondemand(var_name, "backlog", ddo_backlog);
+
+ if(d->device_is_bcache)
+ d->do_bcache = config_get_boolean_ondemand(var_name, "bcache", ddo_bcache);
+ else
+ d->do_bcache = 0;
+ }
+}
+
+static struct disk *get_disk(unsigned long major, unsigned long minor, char *disk) {
+ static struct mountinfo *disk_mountinfo_root = NULL;
+
+ struct disk *d;
+
+ // search for it in our RAM list.
+ // this is sequential, but since we just walk through
+ // and the number of disks / partitions in a system
+ // should not be that many, it should be acceptable
+ for(d = disk_root; d ; d = d->next)
+ if(unlikely(d->major == major && d->minor == minor))
+ return d;
+
+ // not found
+ // create a new disk structure
+ d = (struct disk *)callocz(1, sizeof(struct disk));
+
+ d->disk = get_disk_name(major, minor, disk);
+ d->device = strdupz(disk);
+ d->major = major;
+ d->minor = minor;
+ d->type = DISK_TYPE_UNKNOWN; // Default type. Changed later if not correct.
+ d->sector_size = 512; // the default, will be changed below
+ d->next = NULL;
+
+ // append it to the list
+ if(unlikely(!disk_root))
+ disk_root = d;
+ else {
+ struct disk *last;
+ for(last = disk_root; last->next ;last = last->next);
+ last->next = d;
+ }
+
+ char buffer[FILENAME_MAX + 1];
+
+ // find if it is a physical disk
+ // by checking if /sys/block/DISK is readable.
+ snprintfz(buffer, FILENAME_MAX, path_to_sys_block_device, disk);
+ if(likely(access(buffer, R_OK) == 0)) {
+ // assign it here, but it will be overwritten if it is not a physical disk
+ d->type = DISK_TYPE_PHYSICAL;
+ }
+
+ // find if it is a partition
+ // by checking if /sys/dev/block/MAJOR:MINOR/partition is readable.
+ snprintfz(buffer, FILENAME_MAX, path_to_sys_dev_block_major_minor_string, major, minor, "partition");
+ if(likely(access(buffer, R_OK) == 0)) {
+ d->type = DISK_TYPE_PARTITION;
+ }
+ else {
+ // find if it is a virtual disk
+ // by checking if /sys/devices/virtual/block/DISK is readable.
+ snprintfz(buffer, FILENAME_MAX, path_to_sys_devices_virtual_block_device, disk);
+ if(likely(access(buffer, R_OK) == 0)) {
+ d->type = DISK_TYPE_VIRTUAL;
+ }
+ else {
+ // find if it is a virtual device
+ // by checking if /sys/dev/block/MAJOR:MINOR/slaves has entries
+ snprintfz(buffer, FILENAME_MAX, path_to_sys_dev_block_major_minor_string, major, minor, "slaves/");
+ DIR *dirp = opendir(buffer);
+ if (likely(dirp != NULL)) {
+ struct dirent *dp;
+ while ((dp = readdir(dirp))) {
+ // . and .. are also files in empty folders.
+ if (unlikely(strcmp(dp->d_name, ".") == 0 || strcmp(dp->d_name, "..") == 0)) {
+ continue;
+ }
+
+ d->type = DISK_TYPE_VIRTUAL;
+
+ // Stop the loop after we found one file.
+ break;
+ }
+ if (unlikely(closedir(dirp) == -1))
+ error("Unable to close dir %s", buffer);
+ }
+ }
+ }
+
+ // ------------------------------------------------------------------------
+ // check if we can find its mount point
+
+ // mountinfo_find() can be called with NULL disk_mountinfo_root
+ struct mountinfo *mi = mountinfo_find(disk_mountinfo_root, d->major, d->minor);
+ if(unlikely(!mi)) {
+ // mountinfo_free_all can be called with NULL
+ mountinfo_free_all(disk_mountinfo_root);
+ disk_mountinfo_root = mountinfo_read(0);
+ mi = mountinfo_find(disk_mountinfo_root, d->major, d->minor);
+ }
+
+ if(unlikely(mi))
+ d->mount_point = strdupz(mi->mount_point);
+ else
+ d->mount_point = NULL;
+
+ // ------------------------------------------------------------------------
+ // find the disk sector size
+
+ /*
+ * sector size is always 512 bytes inside the kernel #3481
+ *
+ {
+ char tf[FILENAME_MAX + 1], *t;
+ strncpyz(tf, d->device, FILENAME_MAX);
+
+ // replace all / with !
+ for(t = tf; *t ;t++)
+ if(unlikely(*t == '/')) *t = '!';
+
+ if(likely(d->type == DISK_TYPE_PARTITION))
+ snprintfz(buffer, FILENAME_MAX, path_to_get_hw_sector_size_partitions, d->major, d->minor, tf);
+ else
+ snprintfz(buffer, FILENAME_MAX, path_to_get_hw_sector_size, tf);
+
+ FILE *fpss = fopen(buffer, "r");
+ if(likely(fpss)) {
+ char buffer2[1024 + 1];
+ char *tmp = fgets(buffer2, 1024, fpss);
+
+ if(likely(tmp)) {
+ d->sector_size = str2i(tmp);
+ if(unlikely(d->sector_size <= 0)) {
+ error("Invalid sector size %d for device %s in %s. Assuming 512.", d->sector_size, d->device, buffer);
+ d->sector_size = 512;
+ }
+ }
+ else error("Cannot read data for sector size for device %s from %s. Assuming 512.", d->device, buffer);
+
+ fclose(fpss);
+ }
+ else error("Cannot read sector size for device %s from %s. Assuming 512.", d->device, buffer);
+ }
+ */
+
+ // ------------------------------------------------------------------------
+ // check if the device is a bcache
+
+ struct stat bcache;
+ snprintfz(buffer, FILENAME_MAX, path_to_sys_block_device_bcache, disk);
+ if(unlikely(stat(buffer, &bcache) == 0 && (bcache.st_mode & S_IFMT) == S_IFDIR)) {
+ // we have the 'bcache' directory
+ d->device_is_bcache = 1;
+
+ char buffer2[FILENAME_MAX + 1];
+
+ snprintfz(buffer2, FILENAME_MAX, "%s/cache/congested", buffer);
+ if(access(buffer2, R_OK) == 0)
+ d->bcache_filename_cache_congested = strdupz(buffer2);
+ else
+ error("bcache file '%s' cannot be read.", buffer2);
+
+ snprintfz(buffer2, FILENAME_MAX, "%s/readahead", buffer);
+ if(access(buffer2, R_OK) == 0)
+ d->bcache_filename_stats_total_cache_readaheads = strdupz(buffer2);
+ else
+ error("bcache file '%s' cannot be read.", buffer2);
+
+ snprintfz(buffer2, FILENAME_MAX, "%s/cache/cache0/priority_stats", buffer); // only one cache is supported by bcache
+ if(access(buffer2, R_OK) == 0)
+ d->bcache_filename_priority_stats = strdupz(buffer2);
+ else
+ error("bcache file '%s' cannot be read.", buffer2);
+
+ snprintfz(buffer2, FILENAME_MAX, "%s/cache/internal/cache_read_races", buffer);
+ if(access(buffer2, R_OK) == 0)
+ d->bcache_filename_cache_read_races = strdupz(buffer2);
+ else
+ error("bcache file '%s' cannot be read.", buffer2);
+
+ snprintfz(buffer2, FILENAME_MAX, "%s/cache/cache0/io_errors", buffer);
+ if(access(buffer2, R_OK) == 0)
+ d->bcache_filename_cache_io_errors = strdupz(buffer2);
+ else
+ error("bcache file '%s' cannot be read.", buffer2);
+
+ snprintfz(buffer2, FILENAME_MAX, "%s/dirty_data", buffer);
+ if(access(buffer2, R_OK) == 0)
+ d->bcache_filename_dirty_data = strdupz(buffer2);
+ else
+ error("bcache file '%s' cannot be read.", buffer2);
+
+ snprintfz(buffer2, FILENAME_MAX, "%s/writeback_rate", buffer);
+ if(access(buffer2, R_OK) == 0)
+ d->bcache_filename_writeback_rate = strdupz(buffer2);
+ else
+ error("bcache file '%s' cannot be read.", buffer2);
+
+ snprintfz(buffer2, FILENAME_MAX, "%s/cache/cache_available_percent", buffer);
+ if(access(buffer2, R_OK) == 0)
+ d->bcache_filename_cache_available_percent = strdupz(buffer2);
+ else
+ error("bcache file '%s' cannot be read.", buffer2);
+
+ snprintfz(buffer2, FILENAME_MAX, "%s/stats_total/cache_hits", buffer);
+ if(access(buffer2, R_OK) == 0)
+ d->bcache_filename_stats_total_cache_hits = strdupz(buffer2);
+ else
+ error("bcache file '%s' cannot be read.", buffer2);
+
+ snprintfz(buffer2, FILENAME_MAX, "%s/stats_five_minute/cache_hit_ratio", buffer);
+ if(access(buffer2, R_OK) == 0)
+ d->bcache_filename_stats_five_minute_cache_hit_ratio = strdupz(buffer2);
+ else
+ error("bcache file '%s' cannot be read.", buffer2);
+
+ snprintfz(buffer2, FILENAME_MAX, "%s/stats_hour/cache_hit_ratio", buffer);
+ if(access(buffer2, R_OK) == 0)
+ d->bcache_filename_stats_hour_cache_hit_ratio = strdupz(buffer2);
+ else
+ error("bcache file '%s' cannot be read.", buffer2);
+
+ snprintfz(buffer2, FILENAME_MAX, "%s/stats_day/cache_hit_ratio", buffer);
+ if(access(buffer2, R_OK) == 0)
+ d->bcache_filename_stats_day_cache_hit_ratio = strdupz(buffer2);
+ else
+ error("bcache file '%s' cannot be read.", buffer2);
+
+ snprintfz(buffer2, FILENAME_MAX, "%s/stats_total/cache_hit_ratio", buffer);
+ if(access(buffer2, R_OK) == 0)
+ d->bcache_filename_stats_total_cache_hit_ratio = strdupz(buffer2);
+ else
+ error("bcache file '%s' cannot be read.", buffer2);
+
+ snprintfz(buffer2, FILENAME_MAX, "%s/stats_total/cache_misses", buffer);
+ if(access(buffer2, R_OK) == 0)
+ d->bcache_filename_stats_total_cache_misses = strdupz(buffer2);
+ else
+ error("bcache file '%s' cannot be read.", buffer2);
+
+ snprintfz(buffer2, FILENAME_MAX, "%s/stats_total/cache_bypass_hits", buffer);
+ if(access(buffer2, R_OK) == 0)
+ d->bcache_filename_stats_total_cache_bypass_hits = strdupz(buffer2);
+ else
+ error("bcache file '%s' cannot be read.", buffer2);
+
+ snprintfz(buffer2, FILENAME_MAX, "%s/stats_total/cache_bypass_misses", buffer);
+ if(access(buffer2, R_OK) == 0)
+ d->bcache_filename_stats_total_cache_bypass_misses = strdupz(buffer2);
+ else
+ error("bcache file '%s' cannot be read.", buffer2);
+
+ snprintfz(buffer2, FILENAME_MAX, "%s/stats_total/cache_miss_collisions", buffer);
+ if(access(buffer2, R_OK) == 0)
+ d->bcache_filename_stats_total_cache_miss_collisions = strdupz(buffer2);
+ else
+ error("bcache file '%s' cannot be read.", buffer2);
+ }
+
+ get_disk_config(d);
+ return d;
+}
+
+int do_proc_diskstats(int update_every, usec_t dt) {
+ static procfile *ff = NULL;
+
+ if(unlikely(!globals_initialized)) {
+ globals_initialized = 1;
+
+ global_enable_new_disks_detected_at_runtime = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "enable new disks detected at runtime", global_enable_new_disks_detected_at_runtime);
+ global_enable_performance_for_physical_disks = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "performance metrics for physical disks", global_enable_performance_for_physical_disks);
+ global_enable_performance_for_virtual_disks = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "performance metrics for virtual disks", global_enable_performance_for_virtual_disks);
+ global_enable_performance_for_partitions = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "performance metrics for partitions", global_enable_performance_for_partitions);
+
+ global_do_io = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "bandwidth for all disks", global_do_io);
+ global_do_ops = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "operations for all disks", global_do_ops);
+ global_do_mops = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "merged operations for all disks", global_do_mops);
+ global_do_iotime = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "i/o time for all disks", global_do_iotime);
+ global_do_qops = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "queued operations for all disks", global_do_qops);
+ global_do_util = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "utilization percentage for all disks", global_do_util);
+ global_do_backlog = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "backlog for all disks", global_do_backlog);
+ global_do_bcache = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "bcache for all disks", global_do_bcache);
+ global_bcache_priority_stats_update_every = (int)config_get_number(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "bcache priority stats update every", global_bcache_priority_stats_update_every);
+
+ global_cleanup_removed_disks = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "remove charts of removed disks" , global_cleanup_removed_disks);
+
+ char buffer[FILENAME_MAX + 1];
+
+ snprintfz(buffer, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/block/%s");
+ path_to_sys_block_device = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to get block device", buffer);
+
+ snprintfz(buffer, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/block/%s/bcache");
+ path_to_sys_block_device_bcache = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to get block device bcache", buffer);
+
+ snprintfz(buffer, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/virtual/block/%s");
+ path_to_sys_devices_virtual_block_device = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to get virtual block device", buffer);
+
+ snprintfz(buffer, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/dev/block/%lu:%lu/%s");
+ path_to_sys_dev_block_major_minor_string = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to get block device infos", buffer);
+
+ //snprintfz(buffer, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/block/%s/queue/hw_sector_size");
+ //path_to_get_hw_sector_size = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to get h/w sector size", buffer);
+
+ //snprintfz(buffer, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/dev/block/%lu:%lu/subsystem/%s/../queue/hw_sector_size");
+ //path_to_get_hw_sector_size_partitions = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to get h/w sector size for partitions", buffer);
+
+ snprintfz(buffer, FILENAME_MAX, "%s/dev/mapper", netdata_configured_host_prefix);
+ path_to_device_mapper = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to device mapper", buffer);
+
+ snprintfz(buffer, FILENAME_MAX, "%s/dev/disk/by-label", netdata_configured_host_prefix);
+ path_to_device_label = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to /dev/disk/by-label", buffer);
+
+ snprintfz(buffer, FILENAME_MAX, "%s/dev/disk/by-id", netdata_configured_host_prefix);
+ path_to_device_id = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to /dev/disk/by-id", buffer);
+
+ snprintfz(buffer, FILENAME_MAX, "%s/dev/vx/dsk", netdata_configured_host_prefix);
+ path_to_veritas_volume_groups = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to /dev/vx/dsk", buffer);
+
+ name_disks_by_id = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "name disks by id", name_disks_by_id);
+
+ excluded_disks = simple_pattern_create(
+ config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "exclude disks", DEFAULT_EXCLUDED_DISKS)
+ , NULL
+ , SIMPLE_PATTERN_EXACT
+ );
+ }
+
+ // --------------------------------------------------------------------------
+
+ if(unlikely(!ff)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/diskstats");
+ ff = procfile_open(config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "filename to monitor", filename), " \t", PROCFILE_FLAG_DEFAULT);
+ }
+ if(unlikely(!ff)) return 0;
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time
+
+ size_t lines = procfile_lines(ff), l;
+
+ collected_number system_read_kb = 0, system_write_kb = 0;
+
+ for(l = 0; l < lines ;l++) {
+ // --------------------------------------------------------------------------
+ // Read parameters
+
+ char *disk;
+ unsigned long major = 0, minor = 0;
+
+ collected_number reads = 0, mreads = 0, readsectors = 0, readms = 0,
+ writes = 0, mwrites = 0, writesectors = 0, writems = 0,
+ queued_ios = 0, busy_ms = 0, backlog_ms = 0;
+
+ collected_number last_reads = 0, last_readsectors = 0, last_readms = 0,
+ last_writes = 0, last_writesectors = 0, last_writems = 0,
+ last_busy_ms = 0;
+
+ size_t words = procfile_linewords(ff, l);
+ if(unlikely(words < 14)) continue;
+
+ major = str2ul(procfile_lineword(ff, l, 0));
+ minor = str2ul(procfile_lineword(ff, l, 1));
+ disk = procfile_lineword(ff, l, 2);
+
+ // # of reads completed # of writes completed
+ // This is the total number of reads or writes completed successfully.
+ reads = str2ull(procfile_lineword(ff, l, 3)); // rd_ios
+ writes = str2ull(procfile_lineword(ff, l, 7)); // wr_ios
+
+ // # of reads merged # of writes merged
+ // Reads and writes which are adjacent to each other may be merged for
+ // efficiency. Thus two 4K reads may become one 8K read before it is
+ // ultimately handed to the disk, and so it will be counted (and queued)
+ mreads = str2ull(procfile_lineword(ff, l, 4)); // rd_merges_or_rd_sec
+ mwrites = str2ull(procfile_lineword(ff, l, 8)); // wr_merges
+
+ // # of sectors read # of sectors written
+ // This is the total number of sectors read or written successfully.
+ readsectors = str2ull(procfile_lineword(ff, l, 5)); // rd_sec_or_wr_ios
+ writesectors = str2ull(procfile_lineword(ff, l, 9)); // wr_sec
+
+ // # of milliseconds spent reading # of milliseconds spent writing
+ // This is the total number of milliseconds spent by all reads or writes (as
+ // measured from __make_request() to end_that_request_last()).
+ readms = str2ull(procfile_lineword(ff, l, 6)); // rd_ticks_or_wr_sec
+ writems = str2ull(procfile_lineword(ff, l, 10)); // wr_ticks
+
+ // # of I/Os currently in progress
+ // The only field that should go to zero. Incremented as requests are
+ // given to appropriate struct request_queue and decremented as they finish.
+ queued_ios = str2ull(procfile_lineword(ff, l, 11)); // ios_pgr
+
+ // # of milliseconds spent doing I/Os
+ // This field increases so long as field queued_ios is nonzero.
+ busy_ms = str2ull(procfile_lineword(ff, l, 12)); // tot_ticks
+
+ // weighted # of milliseconds spent doing I/Os
+ // This field is incremented at each I/O start, I/O completion, I/O
+ // merge, or read of these stats by the number of I/Os in progress
+ // (field queued_ios) times the number of milliseconds spent doing I/O since the
+ // last update of this field. This can provide an easy measure of both
+ // I/O completion time and the backlog that may be accumulating.
+ backlog_ms = str2ull(procfile_lineword(ff, l, 13)); // rq_ticks
+
+
+ // --------------------------------------------------------------------------
+ // remove slashes from disk names
+ char *s;
+ for(s = disk; *s ;s++)
+ if(*s == '/') *s = '_';
+
+ // --------------------------------------------------------------------------
+ // get a disk structure for the disk
+
+ struct disk *d = get_disk(major, minor, disk);
+ d->updated = 1;
+
+ // --------------------------------------------------------------------------
+ // count the global system disk I/O of physical disks
+
+ if(unlikely(d->type == DISK_TYPE_PHYSICAL)) {
+ system_read_kb += readsectors * d->sector_size / 1024;
+ system_write_kb += writesectors * d->sector_size / 1024;
+ }
+
+ // --------------------------------------------------------------------------
+ // Set its family based on mount point
+
+ char *family = d->mount_point;
+ if(!family) family = d->disk;
+
+
+ // --------------------------------------------------------------------------
+ // Do performance metrics
+
+ if(d->do_io == CONFIG_BOOLEAN_YES || (d->do_io == CONFIG_BOOLEAN_AUTO && (readsectors || writesectors))) {
+ d->do_io = CONFIG_BOOLEAN_YES;
+
+ if(unlikely(!d->st_io)) {
+ d->st_io = rrdset_create_localhost(
+ RRD_TYPE_DISK
+ , d->device
+ , d->disk
+ , family
+ , "disk.io"
+ , "Disk I/O Bandwidth"
+ , "kilobytes/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_DISKSTATS_NAME
+ , NETDATA_CHART_PRIO_DISK_IO
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ d->rd_io_reads = rrddim_add(d->st_io, "reads", NULL, d->sector_size, 1024, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_io_writes = rrddim_add(d->st_io, "writes", NULL, d->sector_size * -1, 1024, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(d->st_io);
+
+ last_readsectors = rrddim_set_by_pointer(d->st_io, d->rd_io_reads, readsectors);
+ last_writesectors = rrddim_set_by_pointer(d->st_io, d->rd_io_writes, writesectors);
+ rrdset_done(d->st_io);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(d->do_ops == CONFIG_BOOLEAN_YES || (d->do_ops == CONFIG_BOOLEAN_AUTO && (reads || writes))) {
+ d->do_ops = CONFIG_BOOLEAN_YES;
+
+ if(unlikely(!d->st_ops)) {
+ d->st_ops = rrdset_create_localhost(
+ "disk_ops"
+ , d->device
+ , d->disk
+ , family
+ , "disk.ops"
+ , "Disk Completed I/O Operations"
+ , "operations/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_DISKSTATS_NAME
+ , NETDATA_CHART_PRIO_DISK_OPS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(d->st_ops, RRDSET_FLAG_DETAIL);
+
+ d->rd_ops_reads = rrddim_add(d->st_ops, "reads", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_ops_writes = rrddim_add(d->st_ops, "writes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(d->st_ops);
+
+ last_reads = rrddim_set_by_pointer(d->st_ops, d->rd_ops_reads, reads);
+ last_writes = rrddim_set_by_pointer(d->st_ops, d->rd_ops_writes, writes);
+ rrdset_done(d->st_ops);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(d->do_qops == CONFIG_BOOLEAN_YES || (d->do_qops == CONFIG_BOOLEAN_AUTO && queued_ios)) {
+ d->do_qops = CONFIG_BOOLEAN_YES;
+
+ if(unlikely(!d->st_qops)) {
+ d->st_qops = rrdset_create_localhost(
+ "disk_qops"
+ , d->device
+ , d->disk
+ , family
+ , "disk.qops"
+ , "Disk Current I/O Operations"
+ , "operations"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_DISKSTATS_NAME
+ , NETDATA_CHART_PRIO_DISK_QOPS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(d->st_qops, RRDSET_FLAG_DETAIL);
+
+ d->rd_qops_operations = rrddim_add(d->st_qops, "operations", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(d->st_qops);
+
+ rrddim_set_by_pointer(d->st_qops, d->rd_qops_operations, queued_ios);
+ rrdset_done(d->st_qops);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(d->do_backlog == CONFIG_BOOLEAN_YES || (d->do_backlog == CONFIG_BOOLEAN_AUTO && backlog_ms)) {
+ d->do_backlog = CONFIG_BOOLEAN_YES;
+
+ if(unlikely(!d->st_backlog)) {
+ d->st_backlog = rrdset_create_localhost(
+ "disk_backlog"
+ , d->device
+ , d->disk
+ , family
+ , "disk.backlog"
+ , "Disk Backlog"
+ , "backlog (ms)"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_DISKSTATS_NAME
+ , NETDATA_CHART_PRIO_DISK_BACKLOG
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rrdset_flag_set(d->st_backlog, RRDSET_FLAG_DETAIL);
+
+ d->rd_backlog_backlog = rrddim_add(d->st_backlog, "backlog", NULL, 1, 10, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(d->st_backlog);
+
+ rrddim_set_by_pointer(d->st_backlog, d->rd_backlog_backlog, backlog_ms);
+ rrdset_done(d->st_backlog);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(d->do_util == CONFIG_BOOLEAN_YES || (d->do_util == CONFIG_BOOLEAN_AUTO && busy_ms)) {
+ d->do_util = CONFIG_BOOLEAN_YES;
+
+ if(unlikely(!d->st_util)) {
+ d->st_util = rrdset_create_localhost(
+ "disk_util"
+ , d->device
+ , d->disk
+ , family
+ , "disk.util"
+ , "Disk Utilization Time"
+ , "% of time working"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_DISKSTATS_NAME
+ , NETDATA_CHART_PRIO_DISK_UTIL
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rrdset_flag_set(d->st_util, RRDSET_FLAG_DETAIL);
+
+ d->rd_util_utilization = rrddim_add(d->st_util, "utilization", NULL, 1, 10, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(d->st_util);
+
+ last_busy_ms = rrddim_set_by_pointer(d->st_util, d->rd_util_utilization, busy_ms);
+ rrdset_done(d->st_util);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(d->do_mops == CONFIG_BOOLEAN_YES || (d->do_mops == CONFIG_BOOLEAN_AUTO && (mreads || mwrites))) {
+ d->do_mops = CONFIG_BOOLEAN_YES;
+
+ if(unlikely(!d->st_mops)) {
+ d->st_mops = rrdset_create_localhost(
+ "disk_mops"
+ , d->device
+ , d->disk
+ , family
+ , "disk.mops"
+ , "Disk Merged Operations"
+ , "merged operations/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_DISKSTATS_NAME
+ , NETDATA_CHART_PRIO_DISK_MOPS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(d->st_mops, RRDSET_FLAG_DETAIL);
+
+ d->rd_mops_reads = rrddim_add(d->st_mops, "reads", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_mops_writes = rrddim_add(d->st_mops, "writes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(d->st_mops);
+
+ rrddim_set_by_pointer(d->st_mops, d->rd_mops_reads, mreads);
+ rrddim_set_by_pointer(d->st_mops, d->rd_mops_writes, mwrites);
+ rrdset_done(d->st_mops);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(d->do_iotime == CONFIG_BOOLEAN_YES || (d->do_iotime == CONFIG_BOOLEAN_AUTO && (readms || writems))) {
+ d->do_iotime = CONFIG_BOOLEAN_YES;
+
+ if(unlikely(!d->st_iotime)) {
+ d->st_iotime = rrdset_create_localhost(
+ "disk_iotime"
+ , d->device
+ , d->disk
+ , family
+ , "disk.iotime"
+ , "Disk Total I/O Time"
+ , "milliseconds/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_DISKSTATS_NAME
+ , NETDATA_CHART_PRIO_DISK_IOTIME
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(d->st_iotime, RRDSET_FLAG_DETAIL);
+
+ d->rd_iotime_reads = rrddim_add(d->st_iotime, "reads", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_iotime_writes = rrddim_add(d->st_iotime, "writes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(d->st_iotime);
+
+ last_readms = rrddim_set_by_pointer(d->st_iotime, d->rd_iotime_reads, readms);
+ last_writems = rrddim_set_by_pointer(d->st_iotime, d->rd_iotime_writes, writems);
+ rrdset_done(d->st_iotime);
+ }
+
+ // --------------------------------------------------------------------
+ // calculate differential charts
+ // only if this is not the first time we run
+
+ if(likely(dt)) {
+ if( (d->do_iotime == CONFIG_BOOLEAN_YES || (d->do_iotime == CONFIG_BOOLEAN_AUTO && (readms || writems))) &&
+ (d->do_ops == CONFIG_BOOLEAN_YES || (d->do_ops == CONFIG_BOOLEAN_AUTO && (reads || writes)))) {
+
+ if(unlikely(!d->st_await)) {
+ d->st_await = rrdset_create_localhost(
+ "disk_await"
+ , d->device
+ , d->disk
+ , family
+ , "disk.await"
+ , "Average Completed I/O Operation Time"
+ , "ms per operation"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_DISKSTATS_NAME
+ , NETDATA_CHART_PRIO_DISK_AWAIT
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(d->st_await, RRDSET_FLAG_DETAIL);
+
+ d->rd_await_reads = rrddim_add(d->st_await, "reads", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ d->rd_await_writes = rrddim_add(d->st_await, "writes", NULL, -1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(d->st_await);
+
+ rrddim_set_by_pointer(d->st_await, d->rd_await_reads, (reads - last_reads) ? (readms - last_readms) / (reads - last_reads) : 0);
+ rrddim_set_by_pointer(d->st_await, d->rd_await_writes, (writes - last_writes) ? (writems - last_writems) / (writes - last_writes) : 0);
+ rrdset_done(d->st_await);
+ }
+
+ if( (d->do_io == CONFIG_BOOLEAN_YES || (d->do_io == CONFIG_BOOLEAN_AUTO && (readsectors || writesectors))) &&
+ (d->do_ops == CONFIG_BOOLEAN_YES || (d->do_ops == CONFIG_BOOLEAN_AUTO && (reads || writes)))) {
+
+ if(unlikely(!d->st_avgsz)) {
+ d->st_avgsz = rrdset_create_localhost(
+ "disk_avgsz"
+ , d->device
+ , d->disk
+ , family
+ , "disk.avgsz"
+ , "Average Completed I/O Operation Bandwidth"
+ , "kilobytes per operation"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_DISKSTATS_NAME
+ , NETDATA_CHART_PRIO_DISK_AVGSZ
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rrdset_flag_set(d->st_avgsz, RRDSET_FLAG_DETAIL);
+
+ d->rd_avgsz_reads = rrddim_add(d->st_avgsz, "reads", NULL, d->sector_size, 1024, RRD_ALGORITHM_ABSOLUTE);
+ d->rd_avgsz_writes = rrddim_add(d->st_avgsz, "writes", NULL, d->sector_size * -1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(d->st_avgsz);
+
+ rrddim_set_by_pointer(d->st_avgsz, d->rd_avgsz_reads, (reads - last_reads) ? (readsectors - last_readsectors) / (reads - last_reads) : 0);
+ rrddim_set_by_pointer(d->st_avgsz, d->rd_avgsz_writes, (writes - last_writes) ? (writesectors - last_writesectors) / (writes - last_writes) : 0);
+ rrdset_done(d->st_avgsz);
+ }
+
+ if( (d->do_util == CONFIG_BOOLEAN_YES || (d->do_util == CONFIG_BOOLEAN_AUTO && busy_ms)) &&
+ (d->do_ops == CONFIG_BOOLEAN_YES || (d->do_ops == CONFIG_BOOLEAN_AUTO && (reads || writes)))) {
+
+ if(unlikely(!d->st_svctm)) {
+ d->st_svctm = rrdset_create_localhost(
+ "disk_svctm"
+ , d->device
+ , d->disk
+ , family
+ , "disk.svctm"
+ , "Average Service Time"
+ , "ms per operation"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_DISKSTATS_NAME
+ , NETDATA_CHART_PRIO_DISK_SVCTM
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(d->st_svctm, RRDSET_FLAG_DETAIL);
+
+ d->rd_svctm_svctm = rrddim_add(d->st_svctm, "svctm", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(d->st_svctm);
+
+ rrddim_set_by_pointer(d->st_svctm, d->rd_svctm_svctm, ((reads - last_reads) + (writes - last_writes)) ? (busy_ms - last_busy_ms) / ((reads - last_reads) + (writes - last_writes)) : 0);
+ rrdset_done(d->st_svctm);
+ }
+ }
+
+ // --------------------------------------------------------------------------
+ // read bcache metrics and generate the bcache charts
+
+ if(d->device_is_bcache && d->do_bcache != CONFIG_BOOLEAN_NO) {
+ unsigned long long int
+ stats_total_cache_bypass_hits = 0,
+ stats_total_cache_bypass_misses = 0,
+ stats_total_cache_hits = 0,
+ stats_total_cache_miss_collisions = 0,
+ stats_total_cache_misses = 0,
+ stats_five_minute_cache_hit_ratio = 0,
+ stats_hour_cache_hit_ratio = 0,
+ stats_day_cache_hit_ratio = 0,
+ stats_total_cache_hit_ratio = 0,
+ cache_available_percent = 0,
+ cache_readaheads = 0,
+ cache_read_races = 0,
+ cache_io_errors = 0,
+ cache_congested = 0,
+ dirty_data = 0,
+ writeback_rate = 0;
+
+ // read the bcache values
+
+ if(d->bcache_filename_dirty_data)
+ dirty_data = bcache_read_number_with_units(d->bcache_filename_dirty_data);
+
+ if(d->bcache_filename_writeback_rate)
+ writeback_rate = bcache_read_number_with_units(d->bcache_filename_writeback_rate);
+
+ if(d->bcache_filename_cache_congested)
+ cache_congested = bcache_read_number_with_units(d->bcache_filename_cache_congested);
+
+ if(d->bcache_filename_cache_available_percent)
+ read_single_number_file(d->bcache_filename_cache_available_percent, &cache_available_percent);
+
+ if(d->bcache_filename_stats_five_minute_cache_hit_ratio)
+ read_single_number_file(d->bcache_filename_stats_five_minute_cache_hit_ratio, &stats_five_minute_cache_hit_ratio);
+
+ if(d->bcache_filename_stats_hour_cache_hit_ratio)
+ read_single_number_file(d->bcache_filename_stats_hour_cache_hit_ratio, &stats_hour_cache_hit_ratio);
+
+ if(d->bcache_filename_stats_day_cache_hit_ratio)
+ read_single_number_file(d->bcache_filename_stats_day_cache_hit_ratio, &stats_day_cache_hit_ratio);
+
+ if(d->bcache_filename_stats_total_cache_hit_ratio)
+ read_single_number_file(d->bcache_filename_stats_total_cache_hit_ratio, &stats_total_cache_hit_ratio);
+
+ if(d->bcache_filename_stats_total_cache_hits)
+ read_single_number_file(d->bcache_filename_stats_total_cache_hits, &stats_total_cache_hits);
+
+ if(d->bcache_filename_stats_total_cache_misses)
+ read_single_number_file(d->bcache_filename_stats_total_cache_misses, &stats_total_cache_misses);
+
+ if(d->bcache_filename_stats_total_cache_miss_collisions)
+ read_single_number_file(d->bcache_filename_stats_total_cache_miss_collisions, &stats_total_cache_miss_collisions);
+
+ if(d->bcache_filename_stats_total_cache_bypass_hits)
+ read_single_number_file(d->bcache_filename_stats_total_cache_bypass_hits, &stats_total_cache_bypass_hits);
+
+ if(d->bcache_filename_stats_total_cache_bypass_misses)
+ read_single_number_file(d->bcache_filename_stats_total_cache_bypass_misses, &stats_total_cache_bypass_misses);
+
+ if(d->bcache_filename_stats_total_cache_readaheads)
+ cache_readaheads = bcache_read_number_with_units(d->bcache_filename_stats_total_cache_readaheads);
+
+ if(d->bcache_filename_cache_read_races)
+ read_single_number_file(d->bcache_filename_cache_read_races, &cache_read_races);
+
+ if(d->bcache_filename_cache_io_errors)
+ read_single_number_file(d->bcache_filename_cache_io_errors, &cache_io_errors);
+
+ if(d->bcache_filename_priority_stats && global_bcache_priority_stats_update_every >= 1)
+ bcache_read_priority_stats(d, family, global_bcache_priority_stats_update_every, dt);
+
+ // update the charts
+
+ {
+ if(unlikely(!d->st_bcache_hit_ratio)) {
+ d->st_bcache_hit_ratio = rrdset_create_localhost(
+ "disk_bcache_hit_ratio"
+ , d->device
+ , d->disk
+ , family
+ , "disk.bcache_hit_ratio"
+ , "BCache Cache Hit Ratio"
+ , "percentage"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_DISKSTATS_NAME
+ , NETDATA_CHART_PRIO_BCACHE_HIT_RATIO
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ d->rd_bcache_hit_ratio_5min = rrddim_add(d->st_bcache_hit_ratio, "5min", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ d->rd_bcache_hit_ratio_1hour = rrddim_add(d->st_bcache_hit_ratio, "1hour", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ d->rd_bcache_hit_ratio_1day = rrddim_add(d->st_bcache_hit_ratio, "1day", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ d->rd_bcache_hit_ratio_total = rrddim_add(d->st_bcache_hit_ratio, "ever", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(d->st_bcache_hit_ratio);
+
+ rrddim_set_by_pointer(d->st_bcache_hit_ratio, d->rd_bcache_hit_ratio_5min, stats_five_minute_cache_hit_ratio);
+ rrddim_set_by_pointer(d->st_bcache_hit_ratio, d->rd_bcache_hit_ratio_1hour, stats_hour_cache_hit_ratio);
+ rrddim_set_by_pointer(d->st_bcache_hit_ratio, d->rd_bcache_hit_ratio_1day, stats_day_cache_hit_ratio);
+ rrddim_set_by_pointer(d->st_bcache_hit_ratio, d->rd_bcache_hit_ratio_total, stats_total_cache_hit_ratio);
+ rrdset_done(d->st_bcache_hit_ratio);
+ }
+
+ {
+
+ if(unlikely(!d->st_bcache_rates)) {
+ d->st_bcache_rates = rrdset_create_localhost(
+ "disk_bcache_rates"
+ , d->device
+ , d->disk
+ , family
+ , "disk.bcache_rates"
+ , "BCache Rates"
+ , "KB/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_DISKSTATS_NAME
+ , NETDATA_CHART_PRIO_BCACHE_RATES
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ d->rd_bcache_rate_congested = rrddim_add(d->st_bcache_rates, "congested", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ d->rd_bcache_rate_writeback = rrddim_add(d->st_bcache_rates, "writeback", NULL, -1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(d->st_bcache_rates);
+
+ rrddim_set_by_pointer(d->st_bcache_rates, d->rd_bcache_rate_writeback, writeback_rate);
+ rrddim_set_by_pointer(d->st_bcache_rates, d->rd_bcache_rate_congested, cache_congested);
+ rrdset_done(d->st_bcache_rates);
+ }
+
+ {
+ if(unlikely(!d->st_bcache_size)) {
+ d->st_bcache_size = rrdset_create_localhost(
+ "disk_bcache_size"
+ , d->device
+ , d->disk
+ , family
+ , "disk.bcache_size"
+ , "BCache Cache Sizes"
+ , "MB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_DISKSTATS_NAME
+ , NETDATA_CHART_PRIO_BCACHE_SIZE
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ d->rd_bcache_dirty_size = rrddim_add(d->st_bcache_size, "dirty", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(d->st_bcache_size);
+
+ rrddim_set_by_pointer(d->st_bcache_size, d->rd_bcache_dirty_size, dirty_data);
+ rrdset_done(d->st_bcache_size);
+ }
+
+ {
+ if(unlikely(!d->st_bcache_usage)) {
+ d->st_bcache_usage = rrdset_create_localhost(
+ "disk_bcache_usage"
+ , d->device
+ , d->disk
+ , family
+ , "disk.bcache_usage"
+ , "BCache Cache Usage"
+ , "percent"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_DISKSTATS_NAME
+ , NETDATA_CHART_PRIO_BCACHE_USAGE
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ d->rd_bcache_available_percent = rrddim_add(d->st_bcache_usage, "avail", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(d->st_bcache_usage);
+
+ rrddim_set_by_pointer(d->st_bcache_usage, d->rd_bcache_available_percent, cache_available_percent);
+ rrdset_done(d->st_bcache_usage);
+ }
+
+ {
+
+ if(unlikely(!d->st_bcache_cache_read_races)) {
+ d->st_bcache_cache_read_races = rrdset_create_localhost(
+ "disk_bcache_cache_read_races"
+ , d->device
+ , d->disk
+ , family
+ , "disk.bcache_cache_read_races"
+ , "BCache Cache Read Races"
+ , "operations/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_DISKSTATS_NAME
+ , NETDATA_CHART_PRIO_BCACHE_CACHE_READ_RACES
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ d->rd_bcache_cache_read_races = rrddim_add(d->st_bcache_cache_read_races, "races", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_bcache_cache_io_errors = rrddim_add(d->st_bcache_cache_read_races, "errors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(d->st_bcache_cache_read_races);
+
+ rrddim_set_by_pointer(d->st_bcache_cache_read_races, d->rd_bcache_cache_read_races, cache_read_races);
+ rrddim_set_by_pointer(d->st_bcache_cache_read_races, d->rd_bcache_cache_io_errors, cache_io_errors);
+ rrdset_done(d->st_bcache_cache_read_races);
+ }
+
+ if(d->do_bcache == CONFIG_BOOLEAN_YES || (d->do_bcache == CONFIG_BOOLEAN_AUTO && (stats_total_cache_hits != 0 || stats_total_cache_misses != 0 || stats_total_cache_miss_collisions != 0))) {
+
+ if(unlikely(!d->st_bcache)) {
+ d->st_bcache = rrdset_create_localhost(
+ "disk_bcache"
+ , d->device
+ , d->disk
+ , family
+ , "disk.bcache"
+ , "BCache Cache I/O Operations"
+ , "operations/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_DISKSTATS_NAME
+ , NETDATA_CHART_PRIO_BCACHE_OPS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(d->st_bcache, RRDSET_FLAG_DETAIL);
+
+ d->rd_bcache_hits = rrddim_add(d->st_bcache, "hits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_bcache_misses = rrddim_add(d->st_bcache, "misses", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_bcache_miss_collisions = rrddim_add(d->st_bcache, "collisions", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_bcache_readaheads = rrddim_add(d->st_bcache, "readaheads", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(d->st_bcache);
+
+ rrddim_set_by_pointer(d->st_bcache, d->rd_bcache_hits, stats_total_cache_hits);
+ rrddim_set_by_pointer(d->st_bcache, d->rd_bcache_misses, stats_total_cache_misses);
+ rrddim_set_by_pointer(d->st_bcache, d->rd_bcache_miss_collisions, stats_total_cache_miss_collisions);
+ rrddim_set_by_pointer(d->st_bcache, d->rd_bcache_readaheads, cache_readaheads);
+ rrdset_done(d->st_bcache);
+ }
+
+ if(d->do_bcache == CONFIG_BOOLEAN_YES || (d->do_bcache == CONFIG_BOOLEAN_AUTO && (stats_total_cache_bypass_hits != 0 || stats_total_cache_bypass_misses != 0))) {
+
+ if(unlikely(!d->st_bcache_bypass)) {
+ d->st_bcache_bypass = rrdset_create_localhost(
+ "disk_bcache_bypass"
+ , d->device
+ , d->disk
+ , family
+ , "disk.bcache_bypass"
+ , "BCache Cache Bypass I/O Operations"
+ , "operations/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_DISKSTATS_NAME
+ , NETDATA_CHART_PRIO_BCACHE_BYPASS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(d->st_bcache_bypass, RRDSET_FLAG_DETAIL);
+
+ d->rd_bcache_bypass_hits = rrddim_add(d->st_bcache_bypass, "hits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_bcache_bypass_misses = rrddim_add(d->st_bcache_bypass, "misses", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(d->st_bcache_bypass);
+
+ rrddim_set_by_pointer(d->st_bcache_bypass, d->rd_bcache_bypass_hits, stats_total_cache_bypass_hits);
+ rrddim_set_by_pointer(d->st_bcache_bypass, d->rd_bcache_bypass_misses, stats_total_cache_bypass_misses);
+ rrdset_done(d->st_bcache_bypass);
+ }
+ }
+ }
+
+
+ // ------------------------------------------------------------------------
+ // update the system total I/O
+
+ if(global_do_io == CONFIG_BOOLEAN_YES || (global_do_io == CONFIG_BOOLEAN_AUTO && (system_read_kb || system_write_kb))) {
+ static RRDSET *st_io = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL;
+
+ if(unlikely(!st_io)) {
+ st_io = rrdset_create_localhost(
+ "system"
+ , "io"
+ , NULL
+ , "disk"
+ , NULL
+ , "Disk I/O"
+ , "kilobytes/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_DISKSTATS_NAME
+ , NETDATA_CHART_PRIO_SYSTEM_IO
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_in = rrddim_add(st_io, "in", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st_io, "out", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st_io);
+
+ rrddim_set_by_pointer(st_io, rd_in, system_read_kb);
+ rrddim_set_by_pointer(st_io, rd_out, system_write_kb);
+ rrdset_done(st_io);
+ }
+
+
+ // ------------------------------------------------------------------------
+ // cleanup removed disks
+
+ struct disk *d = disk_root, *last = NULL;
+ while(d) {
+ if(unlikely(global_cleanup_removed_disks && !d->updated)) {
+ struct disk *t = d;
+
+ rrdset_obsolete_and_pointer_null(d->st_avgsz);
+ rrdset_obsolete_and_pointer_null(d->st_await);
+ rrdset_obsolete_and_pointer_null(d->st_backlog);
+ rrdset_obsolete_and_pointer_null(d->st_io);
+ rrdset_obsolete_and_pointer_null(d->st_iotime);
+ rrdset_obsolete_and_pointer_null(d->st_mops);
+ rrdset_obsolete_and_pointer_null(d->st_ops);
+ rrdset_obsolete_and_pointer_null(d->st_qops);
+ rrdset_obsolete_and_pointer_null(d->st_svctm);
+ rrdset_obsolete_and_pointer_null(d->st_util);
+ rrdset_obsolete_and_pointer_null(d->st_bcache);
+ rrdset_obsolete_and_pointer_null(d->st_bcache_bypass);
+ rrdset_obsolete_and_pointer_null(d->st_bcache_rates);
+ rrdset_obsolete_and_pointer_null(d->st_bcache_size);
+ rrdset_obsolete_and_pointer_null(d->st_bcache_usage);
+ rrdset_obsolete_and_pointer_null(d->st_bcache_hit_ratio);
+
+ if(d == disk_root) {
+ disk_root = d = d->next;
+ last = NULL;
+ }
+ else if(last) {
+ last->next = d = d->next;
+ }
+
+ freez(t->bcache_filename_dirty_data);
+ freez(t->bcache_filename_writeback_rate);
+ freez(t->bcache_filename_cache_congested);
+ freez(t->bcache_filename_cache_available_percent);
+ freez(t->bcache_filename_stats_five_minute_cache_hit_ratio);
+ freez(t->bcache_filename_stats_hour_cache_hit_ratio);
+ freez(t->bcache_filename_stats_day_cache_hit_ratio);
+ freez(t->bcache_filename_stats_total_cache_hit_ratio);
+ freez(t->bcache_filename_stats_total_cache_hits);
+ freez(t->bcache_filename_stats_total_cache_misses);
+ freez(t->bcache_filename_stats_total_cache_miss_collisions);
+ freez(t->bcache_filename_stats_total_cache_bypass_hits);
+ freez(t->bcache_filename_stats_total_cache_bypass_misses);
+ freez(t->bcache_filename_stats_total_cache_readaheads);
+ freez(t->bcache_filename_cache_read_races);
+ freez(t->bcache_filename_cache_io_errors);
+ freez(t->bcache_filename_priority_stats);
+
+ freez(t->disk);
+ freez(t->device);
+ freez(t->mount_point);
+ freez(t);
+ }
+ else {
+ d->updated = 0;
+ last = d;
+ d = d->next;
+ }
+ }
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_interrupts.c b/collectors/proc.plugin/proc_interrupts.c
new file mode 100644
index 000000000..73b117179
--- /dev/null
+++ b/collectors/proc.plugin/proc_interrupts.c
@@ -0,0 +1,248 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_INTERRUPTS_NAME "/proc/interrupts"
+#define CONFIG_SECTION_PLUGIN_PROC_INTERRUPTS "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_INTERRUPTS_NAME
+
+#define MAX_INTERRUPT_NAME 50
+
+struct cpu_interrupt {
+ unsigned long long value;
+ RRDDIM *rd;
+};
+
+struct interrupt {
+ int used;
+ char *id;
+ char name[MAX_INTERRUPT_NAME + 1];
+ RRDDIM *rd;
+ unsigned long long total;
+ struct cpu_interrupt cpu[];
+};
+
+// since each interrupt is variable in size
+// we use this to calculate its record size
+#define recordsize(cpus) (sizeof(struct interrupt) + ((cpus) * sizeof(struct cpu_interrupt)))
+
+// given a base, get a pointer to each record
+#define irrindex(base, line, cpus) ((struct interrupt *)&((char *)(base))[(line) * recordsize(cpus)])
+
+static inline struct interrupt *get_interrupts_array(size_t lines, int cpus) {
+ static struct interrupt *irrs = NULL;
+ static size_t allocated = 0;
+
+ if(unlikely(lines != allocated)) {
+ size_t l;
+ int c;
+
+ irrs = (struct interrupt *)reallocz(irrs, lines * recordsize(cpus));
+
+ // reset all interrupt RRDDIM pointers as any line could have shifted
+ for(l = 0; l < lines ;l++) {
+ struct interrupt *irr = irrindex(irrs, l, cpus);
+ irr->rd = NULL;
+ irr->name[0] = '\0';
+ for(c = 0; c < cpus ;c++)
+ irr->cpu[c].rd = NULL;
+ }
+
+ allocated = lines;
+ }
+
+ return irrs;
+}
+
+int do_proc_interrupts(int update_every, usec_t dt) {
+ (void)dt;
+ static procfile *ff = NULL;
+ static int cpus = -1, do_per_core = CONFIG_BOOLEAN_INVALID;
+ struct interrupt *irrs = NULL;
+
+ if(unlikely(do_per_core == CONFIG_BOOLEAN_INVALID))
+ do_per_core = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_INTERRUPTS, "interrupts per core", CONFIG_BOOLEAN_AUTO);
+
+ if(unlikely(!ff)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/interrupts");
+ ff = procfile_open(config_get(CONFIG_SECTION_PLUGIN_PROC_INTERRUPTS, "filename to monitor", filename), " \t", PROCFILE_FLAG_DEFAULT);
+ }
+ if(unlikely(!ff))
+ return 1;
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff))
+ return 0; // we return 0, so that we will retry to open it next time
+
+ size_t lines = procfile_lines(ff), l;
+ size_t words = procfile_linewords(ff, 0);
+
+ if(unlikely(!lines)) {
+ error("Cannot read /proc/interrupts, zero lines reported.");
+ return 1;
+ }
+
+ // find how many CPUs are there
+ if(unlikely(cpus == -1)) {
+ uint32_t w;
+ cpus = 0;
+ for(w = 0; w < words ; w++) {
+ if(likely(strncmp(procfile_lineword(ff, 0, w), "CPU", 3) == 0))
+ cpus++;
+ }
+ }
+
+ if(unlikely(!cpus)) {
+ error("PLUGIN: PROC_INTERRUPTS: Cannot find the number of CPUs in /proc/interrupts");
+ return 1;
+ }
+
+ // allocate the size we need;
+ irrs = get_interrupts_array(lines, cpus);
+ irrs[0].used = 0;
+
+ // loop through all lines
+ for(l = 1; l < lines ;l++) {
+ struct interrupt *irr = irrindex(irrs, l, cpus);
+ irr->used = 0;
+ irr->total = 0;
+
+ words = procfile_linewords(ff, l);
+ if(unlikely(!words)) continue;
+
+ irr->id = procfile_lineword(ff, l, 0);
+ if(unlikely(!irr->id || !irr->id[0])) continue;
+
+ size_t idlen = strlen(irr->id);
+ if(irr->id[idlen - 1] == ':')
+ irr->id[--idlen] = '\0';
+
+ int c;
+ for(c = 0; c < cpus ;c++) {
+ if(likely((c + 1) < (int)words))
+ irr->cpu[c].value = str2ull(procfile_lineword(ff, l, (uint32_t)(c + 1)));
+ else
+ irr->cpu[c].value = 0;
+
+ irr->total += irr->cpu[c].value;
+ }
+
+ if(unlikely(isdigit(irr->id[0]) && (uint32_t)(cpus + 2) < words)) {
+ strncpyz(irr->name, procfile_lineword(ff, l, words - 1), MAX_INTERRUPT_NAME);
+ size_t nlen = strlen(irr->name);
+ if(likely(nlen + 1 + idlen <= MAX_INTERRUPT_NAME)) {
+ irr->name[nlen] = '_';
+ strncpyz(&irr->name[nlen + 1], irr->id, MAX_INTERRUPT_NAME - nlen - 1);
+ }
+ else {
+ irr->name[MAX_INTERRUPT_NAME - idlen - 1] = '_';
+ strncpyz(&irr->name[MAX_INTERRUPT_NAME - idlen], irr->id, idlen);
+ }
+ }
+ else {
+ strncpyz(irr->name, irr->id, MAX_INTERRUPT_NAME);
+ }
+
+ irr->used = 1;
+ }
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st_system_interrupts = NULL;
+ if(unlikely(!st_system_interrupts))
+ st_system_interrupts = rrdset_create_localhost(
+ "system"
+ , "interrupts"
+ , NULL
+ , "interrupts"
+ , NULL
+ , "System interrupts"
+ , "interrupts/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_INTERRUPTS_NAME
+ , NETDATA_CHART_PRIO_SYSTEM_INTERRUPTS
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+ else
+ rrdset_next(st_system_interrupts);
+
+ for(l = 0; l < lines ;l++) {
+ struct interrupt *irr = irrindex(irrs, l, cpus);
+ if(irr->used && irr->total) {
+ // some interrupt may have changed without changing the total number of lines
+ // if the same number of interrupts have been added and removed between two
+ // calls of this function.
+ if(unlikely(!irr->rd || strncmp(irr->rd->name, irr->name, MAX_INTERRUPT_NAME) != 0)) {
+ irr->rd = rrddim_add(st_system_interrupts, irr->id, irr->name, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_set_name(st_system_interrupts, irr->rd, irr->name);
+
+ // also reset per cpu RRDDIMs to avoid repeating strncmp() in the per core loop
+ if(likely(do_per_core != CONFIG_BOOLEAN_NO)) {
+ int c;
+ for(c = 0; c < cpus; c++) irr->cpu[c].rd = NULL;
+ }
+ }
+
+ rrddim_set_by_pointer(st_system_interrupts, irr->rd, irr->total);
+ }
+ }
+
+ rrdset_done(st_system_interrupts);
+
+ // --------------------------------------------------------------------
+
+ if(likely(do_per_core != CONFIG_BOOLEAN_NO)) {
+ static RRDSET **core_st = NULL;
+ static int old_cpus = 0;
+
+ if(old_cpus < cpus) {
+ core_st = reallocz(core_st, sizeof(RRDSET *) * cpus);
+ memset(&core_st[old_cpus], 0, sizeof(RRDSET *) * (cpus - old_cpus));
+ old_cpus = cpus;
+ }
+
+ int c;
+
+ for(c = 0; c < cpus ;c++) {
+ if(unlikely(!core_st[c])) {
+ char id[50+1];
+ snprintfz(id, 50, "cpu%d_interrupts", c);
+
+ char title[100+1];
+ snprintfz(title, 100, "CPU%d Interrupts", c);
+ core_st[c] = rrdset_create_localhost(
+ "cpu"
+ , id
+ , NULL
+ , "interrupts"
+ , "cpu.interrupts"
+ , title
+ , "interrupts/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_INTERRUPTS_NAME
+ , NETDATA_CHART_PRIO_INTERRUPTS_PER_CORE + c
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+ }
+ else rrdset_next(core_st[c]);
+
+ for(l = 0; l < lines ;l++) {
+ struct interrupt *irr = irrindex(irrs, l, cpus);
+ if(irr->used && (do_per_core == CONFIG_BOOLEAN_YES || irr->cpu[c].value)) {
+ if(unlikely(!irr->cpu[c].rd)) {
+ irr->cpu[c].rd = rrddim_add(core_st[c], irr->id, irr->name, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_set_name(core_st[c], irr->cpu[c].rd, irr->name);
+ }
+
+ rrddim_set_by_pointer(core_st[c], irr->cpu[c].rd, irr->cpu[c].value);
+ }
+ }
+
+ rrdset_done(core_st[c]);
+ }
+ }
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_loadavg.c b/collectors/proc.plugin/proc_loadavg.c
new file mode 100644
index 000000000..db95b1689
--- /dev/null
+++ b/collectors/proc.plugin/proc_loadavg.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_LOADAVG_NAME "/proc/loadavg"
+#define CONFIG_SECTION_PLUGIN_PROC_LOADAVG "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_LOADAVG_NAME
+
+// linux calculates this once every 5 seconds
+#define MIN_LOADAVG_UPDATE_EVERY 5
+
+int do_proc_loadavg(int update_every, usec_t dt) {
+ static procfile *ff = NULL;
+ static int do_loadavg = -1, do_all_processes = -1;
+ static usec_t next_loadavg_dt = 0;
+
+ if(unlikely(!ff)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/loadavg");
+
+ ff = procfile_open(config_get(CONFIG_SECTION_PLUGIN_PROC_LOADAVG, "filename to monitor", filename), " \t,:|/", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff))
+ return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff))
+ return 0; // we return 0, so that we will retry to open it next time
+
+ if(unlikely(do_loadavg == -1)) {
+ do_loadavg = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_LOADAVG, "enable load average", 1);
+ do_all_processes = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_LOADAVG, "enable total processes", 1);
+ }
+
+ if(unlikely(procfile_lines(ff) < 1)) {
+ error("/proc/loadavg has no lines.");
+ return 1;
+ }
+ if(unlikely(procfile_linewords(ff, 0) < 6)) {
+ error("/proc/loadavg has less than 6 words in it.");
+ return 1;
+ }
+
+ double load1 = strtod(procfile_lineword(ff, 0, 0), NULL);
+ double load5 = strtod(procfile_lineword(ff, 0, 1), NULL);
+ double load15 = strtod(procfile_lineword(ff, 0, 2), NULL);
+
+ //unsigned long long running_processes = str2ull(procfile_lineword(ff, 0, 3));
+ unsigned long long active_processes = str2ull(procfile_lineword(ff, 0, 4));
+ //unsigned long long next_pid = str2ull(procfile_lineword(ff, 0, 5));
+
+
+ // --------------------------------------------------------------------
+
+ if(next_loadavg_dt <= dt) {
+ if(likely(do_loadavg)) {
+ static RRDSET *load_chart = NULL;
+ static RRDDIM *rd_load1 = NULL, *rd_load5 = NULL, *rd_load15 = NULL;
+
+ if(unlikely(!load_chart)) {
+ load_chart = rrdset_create_localhost(
+ "system"
+ , "load"
+ , NULL
+ , "load"
+ , NULL
+ , "System Load Average"
+ , "load"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_LOADAVG_NAME
+ , NETDATA_CHART_PRIO_SYSTEM_LOAD
+ , (update_every < MIN_LOADAVG_UPDATE_EVERY) ? MIN_LOADAVG_UPDATE_EVERY : update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_load1 = rrddim_add(load_chart, "load1", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ rd_load5 = rrddim_add(load_chart, "load5", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ rd_load15 = rrddim_add(load_chart, "load15", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(load_chart);
+
+ rrddim_set_by_pointer(load_chart, rd_load1, (collected_number) (load1 * 1000));
+ rrddim_set_by_pointer(load_chart, rd_load5, (collected_number) (load5 * 1000));
+ rrddim_set_by_pointer(load_chart, rd_load15, (collected_number) (load15 * 1000));
+ rrdset_done(load_chart);
+
+ next_loadavg_dt = load_chart->update_every * USEC_PER_SEC;
+ }
+ else next_loadavg_dt = MIN_LOADAVG_UPDATE_EVERY * USEC_PER_SEC;
+ }
+ else next_loadavg_dt -= dt;
+
+ // --------------------------------------------------------------------
+
+ if(likely(do_all_processes)) {
+ static RRDSET *processes_chart = NULL;
+ static RRDDIM *rd_active = NULL;
+
+ if(unlikely(!processes_chart)) {
+ processes_chart = rrdset_create_localhost(
+ "system"
+ , "active_processes"
+ , NULL
+ , "processes"
+ , NULL
+ , "System Active Processes"
+ , "processes"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_LOADAVG_NAME
+ , NETDATA_CHART_PRIO_SYSTEM_ACTIVE_PROCESSES
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_active = rrddim_add(processes_chart, "active", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(processes_chart);
+
+ rrddim_set_by_pointer(processes_chart, rd_active, active_processes);
+ rrdset_done(processes_chart);
+ }
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_meminfo.c b/collectors/proc.plugin/proc_meminfo.c
new file mode 100644
index 000000000..f77159ebd
--- /dev/null
+++ b/collectors/proc.plugin/proc_meminfo.c
@@ -0,0 +1,519 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_MEMINFO_NAME "/proc/meminfo"
+#define CONFIG_SECTION_PLUGIN_PROC_MEMINFO "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_MEMINFO_NAME
+
+int do_proc_meminfo(int update_every, usec_t dt) {
+ (void)dt;
+
+ static procfile *ff = NULL;
+ static int do_ram = -1, do_swap = -1, do_hwcorrupt = -1, do_committed = -1, do_writeback = -1, do_kernel = -1, do_slab = -1, do_hugepages = -1, do_transparent_hugepages = -1;
+
+ static ARL_BASE *arl_base = NULL;
+ static ARL_ENTRY *arl_hwcorrupted = NULL, *arl_memavailable = NULL;
+
+ static unsigned long long
+ MemTotal = 0,
+ MemFree = 0,
+ MemAvailable = 0,
+ Buffers = 0,
+ Cached = 0,
+ //SwapCached = 0,
+ //Active = 0,
+ //Inactive = 0,
+ //ActiveAnon = 0,
+ //InactiveAnon = 0,
+ //ActiveFile = 0,
+ //InactiveFile = 0,
+ //Unevictable = 0,
+ //Mlocked = 0,
+ SwapTotal = 0,
+ SwapFree = 0,
+ Dirty = 0,
+ Writeback = 0,
+ //AnonPages = 0,
+ //Mapped = 0,
+ //Shmem = 0,
+ Slab = 0,
+ SReclaimable = 0,
+ SUnreclaim = 0,
+ KernelStack = 0,
+ PageTables = 0,
+ NFS_Unstable = 0,
+ Bounce = 0,
+ WritebackTmp = 0,
+ //CommitLimit = 0,
+ Committed_AS = 0,
+ //VmallocTotal = 0,
+ VmallocUsed = 0,
+ //VmallocChunk = 0,
+ AnonHugePages = 0,
+ ShmemHugePages = 0,
+ HugePages_Total = 0,
+ HugePages_Free = 0,
+ HugePages_Rsvd = 0,
+ HugePages_Surp = 0,
+ Hugepagesize = 0,
+ //DirectMap4k = 0,
+ //DirectMap2M = 0,
+ HardwareCorrupted = 0;
+
+ if(unlikely(!arl_base)) {
+ do_ram = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "system ram", 1);
+ do_swap = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "system swap", CONFIG_BOOLEAN_AUTO);
+ do_hwcorrupt = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "hardware corrupted ECC", CONFIG_BOOLEAN_AUTO);
+ do_committed = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "committed memory", 1);
+ do_writeback = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "writeback memory", 1);
+ do_kernel = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "kernel memory", 1);
+ do_slab = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "slab memory", 1);
+ do_hugepages = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "hugepages", CONFIG_BOOLEAN_AUTO);
+ do_transparent_hugepages = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "transparent hugepages", CONFIG_BOOLEAN_AUTO);
+
+ arl_base = arl_create("meminfo", NULL, 60);
+ arl_expect(arl_base, "MemTotal", &MemTotal);
+ arl_expect(arl_base, "MemFree", &MemFree);
+ arl_memavailable = arl_expect(arl_base, "MemAvailable", &MemAvailable);
+ arl_expect(arl_base, "Buffers", &Buffers);
+ arl_expect(arl_base, "Cached", &Cached);
+ //arl_expect(arl_base, "SwapCached", &SwapCached);
+ //arl_expect(arl_base, "Active", &Active);
+ //arl_expect(arl_base, "Inactive", &Inactive);
+ //arl_expect(arl_base, "ActiveAnon", &ActiveAnon);
+ //arl_expect(arl_base, "InactiveAnon", &InactiveAnon);
+ //arl_expect(arl_base, "ActiveFile", &ActiveFile);
+ //arl_expect(arl_base, "InactiveFile", &InactiveFile);
+ //arl_expect(arl_base, "Unevictable", &Unevictable);
+ //arl_expect(arl_base, "Mlocked", &Mlocked);
+ arl_expect(arl_base, "SwapTotal", &SwapTotal);
+ arl_expect(arl_base, "SwapFree", &SwapFree);
+ arl_expect(arl_base, "Dirty", &Dirty);
+ arl_expect(arl_base, "Writeback", &Writeback);
+ //arl_expect(arl_base, "AnonPages", &AnonPages);
+ //arl_expect(arl_base, "Mapped", &Mapped);
+ //arl_expect(arl_base, "Shmem", &Shmem);
+ arl_expect(arl_base, "Slab", &Slab);
+ arl_expect(arl_base, "SReclaimable", &SReclaimable);
+ arl_expect(arl_base, "SUnreclaim", &SUnreclaim);
+ arl_expect(arl_base, "KernelStack", &KernelStack);
+ arl_expect(arl_base, "PageTables", &PageTables);
+ arl_expect(arl_base, "NFS_Unstable", &NFS_Unstable);
+ arl_expect(arl_base, "Bounce", &Bounce);
+ arl_expect(arl_base, "WritebackTmp", &WritebackTmp);
+ //arl_expect(arl_base, "CommitLimit", &CommitLimit);
+ arl_expect(arl_base, "Committed_AS", &Committed_AS);
+ //arl_expect(arl_base, "VmallocTotal", &VmallocTotal);
+ arl_expect(arl_base, "VmallocUsed", &VmallocUsed);
+ //arl_expect(arl_base, "VmallocChunk", &VmallocChunk);
+ arl_hwcorrupted = arl_expect(arl_base, "HardwareCorrupted", &HardwareCorrupted);
+ arl_expect(arl_base, "AnonHugePages", &AnonHugePages);
+ arl_expect(arl_base, "ShmemHugePages", &ShmemHugePages);
+ arl_expect(arl_base, "HugePages_Total", &HugePages_Total);
+ arl_expect(arl_base, "HugePages_Free", &HugePages_Free);
+ arl_expect(arl_base, "HugePages_Rsvd", &HugePages_Rsvd);
+ arl_expect(arl_base, "HugePages_Surp", &HugePages_Surp);
+ arl_expect(arl_base, "Hugepagesize", &Hugepagesize);
+ //arl_expect(arl_base, "DirectMap4k", &DirectMap4k);
+ //arl_expect(arl_base, "DirectMap2M", &DirectMap2M);
+ }
+
+ if(unlikely(!ff)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/meminfo");
+ ff = procfile_open(config_get(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff))
+ return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff))
+ return 0; // we return 0, so that we will retry to open it next time
+
+ size_t lines = procfile_lines(ff), l;
+
+ arl_begin(arl_base);
+
+ for(l = 0; l < lines ;l++) {
+ size_t words = procfile_linewords(ff, l);
+ if(unlikely(words < 2)) continue;
+
+ if(unlikely(arl_check(arl_base,
+ procfile_lineword(ff, l, 0),
+ procfile_lineword(ff, l, 1)))) break;
+ }
+
+ // --------------------------------------------------------------------
+
+ // http://stackoverflow.com/questions/3019748/how-to-reliably-measure-available-memory-in-linux
+ unsigned long long MemCached = Cached + Slab;
+ unsigned long long MemUsed = MemTotal - MemFree - MemCached - Buffers;
+
+ if(do_ram) {
+ {
+ static RRDSET *st_system_ram = NULL;
+ static RRDDIM *rd_free = NULL, *rd_used = NULL, *rd_cached = NULL, *rd_buffers = NULL;
+
+ if(unlikely(!st_system_ram)) {
+ st_system_ram = rrdset_create_localhost(
+ "system"
+ , "ram"
+ , NULL
+ , "ram"
+ , NULL
+ , "System RAM"
+ , "MB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_MEMINFO_NAME
+ , NETDATA_CHART_PRIO_SYSTEM_RAM
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_free = rrddim_add(st_system_ram, "free", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_used = rrddim_add(st_system_ram, "used", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_cached = rrddim_add(st_system_ram, "cached", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_buffers = rrddim_add(st_system_ram, "buffers", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st_system_ram);
+
+ rrddim_set_by_pointer(st_system_ram, rd_free, MemFree);
+ rrddim_set_by_pointer(st_system_ram, rd_used, MemUsed);
+ rrddim_set_by_pointer(st_system_ram, rd_cached, MemCached);
+ rrddim_set_by_pointer(st_system_ram, rd_buffers, Buffers);
+
+ rrdset_done(st_system_ram);
+ }
+
+ if(arl_memavailable->flags & ARL_ENTRY_FLAG_FOUND) {
+ static RRDSET *st_mem_available = NULL;
+ static RRDDIM *rd_avail = NULL;
+
+ if(unlikely(!st_mem_available)) {
+ st_mem_available = rrdset_create_localhost(
+ "mem"
+ , "available"
+ , NULL
+ , "system"
+ , NULL
+ , "Available RAM for applications"
+ , "MB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_MEMINFO_NAME
+ , NETDATA_CHART_PRIO_MEM_SYSTEM_AVAILABLE
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_avail = rrddim_add(st_mem_available, "MemAvailable", "avail", 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st_mem_available);
+
+ rrddim_set_by_pointer(st_mem_available, rd_avail, MemAvailable);
+
+ rrdset_done(st_mem_available);
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ unsigned long long SwapUsed = SwapTotal - SwapFree;
+
+ if(do_swap == CONFIG_BOOLEAN_YES || SwapTotal || SwapUsed || SwapFree) {
+ do_swap = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_system_swap = NULL;
+ static RRDDIM *rd_free = NULL, *rd_used = NULL;
+
+ if(unlikely(!st_system_swap)) {
+ st_system_swap = rrdset_create_localhost(
+ "system"
+ , "swap"
+ , NULL
+ , "swap"
+ , NULL
+ , "System Swap"
+ , "MB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_MEMINFO_NAME
+ , NETDATA_CHART_PRIO_SYSTEM_SWAP
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rrdset_flag_set(st_system_swap, RRDSET_FLAG_DETAIL);
+
+ rd_free = rrddim_add(st_system_swap, "free", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_used = rrddim_add(st_system_swap, "used", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st_system_swap);
+
+ rrddim_set_by_pointer(st_system_swap, rd_used, SwapUsed);
+ rrddim_set_by_pointer(st_system_swap, rd_free, SwapFree);
+
+ rrdset_done(st_system_swap);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(arl_hwcorrupted->flags & ARL_ENTRY_FLAG_FOUND && (do_hwcorrupt == CONFIG_BOOLEAN_YES || (do_hwcorrupt == CONFIG_BOOLEAN_AUTO && HardwareCorrupted > 0))) {
+ do_hwcorrupt = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_mem_hwcorrupt = NULL;
+ static RRDDIM *rd_corrupted = NULL;
+
+ if(unlikely(!st_mem_hwcorrupt)) {
+ st_mem_hwcorrupt = rrdset_create_localhost(
+ "mem"
+ , "hwcorrupt"
+ , NULL
+ , "ecc"
+ , NULL
+ , "Corrupted Memory, detected by ECC"
+ , "MB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_MEMINFO_NAME
+ , NETDATA_CHART_PRIO_MEM_HW
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st_mem_hwcorrupt, RRDSET_FLAG_DETAIL);
+
+ rd_corrupted = rrddim_add(st_mem_hwcorrupt, "HardwareCorrupted", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st_mem_hwcorrupt);
+
+ rrddim_set_by_pointer(st_mem_hwcorrupt, rd_corrupted, HardwareCorrupted);
+
+ rrdset_done(st_mem_hwcorrupt);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_committed) {
+ static RRDSET *st_mem_committed = NULL;
+ static RRDDIM *rd_committed = NULL;
+
+ if(unlikely(!st_mem_committed)) {
+ st_mem_committed = rrdset_create_localhost(
+ "mem"
+ , "committed"
+ , NULL
+ , "system"
+ , NULL
+ , "Committed (Allocated) Memory"
+ , "MB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_MEMINFO_NAME
+ , NETDATA_CHART_PRIO_MEM_SYSTEM_COMMITTED
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rrdset_flag_set(st_mem_committed, RRDSET_FLAG_DETAIL);
+
+ rd_committed = rrddim_add(st_mem_committed, "Committed_AS", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st_mem_committed);
+
+ rrddim_set_by_pointer(st_mem_committed, rd_committed, Committed_AS);
+
+ rrdset_done(st_mem_committed);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_writeback) {
+ static RRDSET *st_mem_writeback = NULL;
+ static RRDDIM *rd_dirty = NULL, *rd_writeback = NULL, *rd_fusewriteback = NULL, *rd_nfs_writeback = NULL, *rd_bounce = NULL;
+
+ if(unlikely(!st_mem_writeback)) {
+ st_mem_writeback = rrdset_create_localhost(
+ "mem"
+ , "writeback"
+ , NULL
+ , "kernel"
+ , NULL
+ , "Writeback Memory"
+ , "MB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_MEMINFO_NAME
+ , NETDATA_CHART_PRIO_MEM_KERNEL
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st_mem_writeback, RRDSET_FLAG_DETAIL);
+
+ rd_dirty = rrddim_add(st_mem_writeback, "Dirty", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_writeback = rrddim_add(st_mem_writeback, "Writeback", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_fusewriteback = rrddim_add(st_mem_writeback, "FuseWriteback", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_nfs_writeback = rrddim_add(st_mem_writeback, "NfsWriteback", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_bounce = rrddim_add(st_mem_writeback, "Bounce", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st_mem_writeback);
+
+ rrddim_set_by_pointer(st_mem_writeback, rd_dirty, Dirty);
+ rrddim_set_by_pointer(st_mem_writeback, rd_writeback, Writeback);
+ rrddim_set_by_pointer(st_mem_writeback, rd_fusewriteback, WritebackTmp);
+ rrddim_set_by_pointer(st_mem_writeback, rd_nfs_writeback, NFS_Unstable);
+ rrddim_set_by_pointer(st_mem_writeback, rd_bounce, Bounce);
+
+ rrdset_done(st_mem_writeback);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_kernel) {
+ static RRDSET *st_mem_kernel = NULL;
+ static RRDDIM *rd_slab = NULL, *rd_kernelstack = NULL, *rd_pagetables = NULL, *rd_vmallocused = NULL;
+
+ if(unlikely(!st_mem_kernel)) {
+ st_mem_kernel = rrdset_create_localhost(
+ "mem"
+ , "kernel"
+ , NULL
+ , "kernel"
+ , NULL
+ , "Memory Used by Kernel"
+ , "MB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_MEMINFO_NAME
+ , NETDATA_CHART_PRIO_MEM_KERNEL + 1
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rrdset_flag_set(st_mem_kernel, RRDSET_FLAG_DETAIL);
+
+ rd_slab = rrddim_add(st_mem_kernel, "Slab", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_kernelstack = rrddim_add(st_mem_kernel, "KernelStack", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_pagetables = rrddim_add(st_mem_kernel, "PageTables", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_vmallocused = rrddim_add(st_mem_kernel, "VmallocUsed", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st_mem_kernel);
+
+ rrddim_set_by_pointer(st_mem_kernel, rd_slab, Slab);
+ rrddim_set_by_pointer(st_mem_kernel, rd_kernelstack, KernelStack);
+ rrddim_set_by_pointer(st_mem_kernel, rd_pagetables, PageTables);
+ rrddim_set_by_pointer(st_mem_kernel, rd_vmallocused, VmallocUsed);
+
+ rrdset_done(st_mem_kernel);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_slab) {
+ static RRDSET *st_mem_slab = NULL;
+ static RRDDIM *rd_reclaimable = NULL, *rd_unreclaimable = NULL;
+
+ if(unlikely(!st_mem_slab)) {
+ st_mem_slab = rrdset_create_localhost(
+ "mem"
+ , "slab"
+ , NULL
+ , "slab"
+ , NULL
+ , "Reclaimable Kernel Memory"
+ , "MB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_MEMINFO_NAME
+ , NETDATA_CHART_PRIO_MEM_SLAB
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rrdset_flag_set(st_mem_slab, RRDSET_FLAG_DETAIL);
+
+ rd_reclaimable = rrddim_add(st_mem_slab, "reclaimable", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_unreclaimable = rrddim_add(st_mem_slab, "unreclaimable", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st_mem_slab);
+
+ rrddim_set_by_pointer(st_mem_slab, rd_reclaimable, SReclaimable);
+ rrddim_set_by_pointer(st_mem_slab, rd_unreclaimable, SUnreclaim);
+
+ rrdset_done(st_mem_slab);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_hugepages == CONFIG_BOOLEAN_YES || (do_hugepages == CONFIG_BOOLEAN_AUTO && Hugepagesize != 0 && HugePages_Total != 0)) {
+ do_hugepages = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_mem_hugepages = NULL;
+ static RRDDIM *rd_used = NULL, *rd_free = NULL, *rd_rsvd = NULL, *rd_surp = NULL;
+
+ if(unlikely(!st_mem_hugepages)) {
+ st_mem_hugepages = rrdset_create_localhost(
+ "mem"
+ , "hugepages"
+ , NULL
+ , "hugepages"
+ , NULL
+ , "Dedicated HugePages Memory"
+ , "MB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_MEMINFO_NAME
+ , NETDATA_CHART_PRIO_MEM_HUGEPAGES + 1
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rrdset_flag_set(st_mem_hugepages, RRDSET_FLAG_DETAIL);
+
+ rd_free = rrddim_add(st_mem_hugepages, "free", NULL, Hugepagesize, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_used = rrddim_add(st_mem_hugepages, "used", NULL, Hugepagesize, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_surp = rrddim_add(st_mem_hugepages, "surplus", NULL, Hugepagesize, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_rsvd = rrddim_add(st_mem_hugepages, "reserved", NULL, Hugepagesize, 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st_mem_hugepages);
+
+ rrddim_set_by_pointer(st_mem_hugepages, rd_used, HugePages_Total - HugePages_Free - HugePages_Rsvd);
+ rrddim_set_by_pointer(st_mem_hugepages, rd_free, HugePages_Free);
+ rrddim_set_by_pointer(st_mem_hugepages, rd_rsvd, HugePages_Rsvd);
+ rrddim_set_by_pointer(st_mem_hugepages, rd_surp, HugePages_Surp);
+
+ rrdset_done(st_mem_hugepages);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_transparent_hugepages == CONFIG_BOOLEAN_YES || (do_transparent_hugepages == CONFIG_BOOLEAN_AUTO && (AnonHugePages != 0 || ShmemHugePages != 0))) {
+ do_transparent_hugepages = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_mem_transparent_hugepages = NULL;
+ static RRDDIM *rd_anonymous = NULL, *rd_shared = NULL;
+
+ if(unlikely(!st_mem_transparent_hugepages)) {
+ st_mem_transparent_hugepages = rrdset_create_localhost(
+ "mem"
+ , "transparent_hugepages"
+ , NULL
+ , "hugepages"
+ , NULL
+ , "Transparent HugePages Memory"
+ , "MB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_MEMINFO_NAME
+ , NETDATA_CHART_PRIO_MEM_HUGEPAGES
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rrdset_flag_set(st_mem_transparent_hugepages, RRDSET_FLAG_DETAIL);
+
+ rd_anonymous = rrddim_add(st_mem_transparent_hugepages, "anonymous", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_shared = rrddim_add(st_mem_transparent_hugepages, "shmem", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st_mem_transparent_hugepages);
+
+ rrddim_set_by_pointer(st_mem_transparent_hugepages, rd_anonymous, AnonHugePages);
+ rrddim_set_by_pointer(st_mem_transparent_hugepages, rd_shared, ShmemHugePages);
+
+ rrdset_done(st_mem_transparent_hugepages);
+ }
+
+ return 0;
+}
+
diff --git a/collectors/proc.plugin/proc_net_dev.c b/collectors/proc.plugin/proc_net_dev.c
new file mode 100644
index 000000000..97cbc060a
--- /dev/null
+++ b/collectors/proc.plugin/proc_net_dev.c
@@ -0,0 +1,912 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_NETDEV_NAME "/proc/net/dev"
+#define CONFIG_SECTION_PLUGIN_PROC_NETDEV "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_NETDEV_NAME
+
+// ----------------------------------------------------------------------------
+// netdev list
+
+static struct netdev {
+ char *name;
+ uint32_t hash;
+ size_t len;
+
+ // flags
+ int virtual;
+ int configured;
+ int enabled;
+ int updated;
+
+ int do_bandwidth;
+ int do_packets;
+ int do_errors;
+ int do_drops;
+ int do_fifo;
+ int do_compressed;
+ int do_events;
+
+ const char *chart_type_net_bytes;
+ const char *chart_type_net_packets;
+ const char *chart_type_net_errors;
+ const char *chart_type_net_fifo;
+ const char *chart_type_net_events;
+ const char *chart_type_net_drops;
+ const char *chart_type_net_compressed;
+
+ const char *chart_id_net_bytes;
+ const char *chart_id_net_packets;
+ const char *chart_id_net_errors;
+ const char *chart_id_net_fifo;
+ const char *chart_id_net_events;
+ const char *chart_id_net_drops;
+ const char *chart_id_net_compressed;
+
+ const char *chart_family;
+
+ int flipped;
+ unsigned long priority;
+
+ // data collected
+ kernel_uint_t rbytes;
+ kernel_uint_t rpackets;
+ kernel_uint_t rerrors;
+ kernel_uint_t rdrops;
+ kernel_uint_t rfifo;
+ kernel_uint_t rframe;
+ kernel_uint_t rcompressed;
+ kernel_uint_t rmulticast;
+
+ kernel_uint_t tbytes;
+ kernel_uint_t tpackets;
+ kernel_uint_t terrors;
+ kernel_uint_t tdrops;
+ kernel_uint_t tfifo;
+ kernel_uint_t tcollisions;
+ kernel_uint_t tcarrier;
+ kernel_uint_t tcompressed;
+ kernel_uint_t speed_max;
+
+ // charts
+ RRDSET *st_bandwidth;
+ RRDSET *st_packets;
+ RRDSET *st_errors;
+ RRDSET *st_drops;
+ RRDSET *st_fifo;
+ RRDSET *st_compressed;
+ RRDSET *st_events;
+
+ // dimensions
+ RRDDIM *rd_rbytes;
+ RRDDIM *rd_rpackets;
+ RRDDIM *rd_rerrors;
+ RRDDIM *rd_rdrops;
+ RRDDIM *rd_rfifo;
+ RRDDIM *rd_rframe;
+ RRDDIM *rd_rcompressed;
+ RRDDIM *rd_rmulticast;
+
+ RRDDIM *rd_tbytes;
+ RRDDIM *rd_tpackets;
+ RRDDIM *rd_terrors;
+ RRDDIM *rd_tdrops;
+ RRDDIM *rd_tfifo;
+ RRDDIM *rd_tcollisions;
+ RRDDIM *rd_tcarrier;
+ RRDDIM *rd_tcompressed;
+
+ struct netdev *next;
+} *netdev_root = NULL, *netdev_last_used = NULL;
+
+static size_t netdev_added = 0, netdev_found = 0;
+
+// ----------------------------------------------------------------------------
+
+static void netdev_charts_release(struct netdev *d) {
+ if(d->st_bandwidth) rrdset_is_obsolete(d->st_bandwidth);
+ if(d->st_packets) rrdset_is_obsolete(d->st_packets);
+ if(d->st_errors) rrdset_is_obsolete(d->st_errors);
+ if(d->st_drops) rrdset_is_obsolete(d->st_drops);
+ if(d->st_fifo) rrdset_is_obsolete(d->st_fifo);
+ if(d->st_compressed) rrdset_is_obsolete(d->st_compressed);
+ if(d->st_events) rrdset_is_obsolete(d->st_events);
+
+ d->st_bandwidth = NULL;
+ d->st_compressed = NULL;
+ d->st_drops = NULL;
+ d->st_errors = NULL;
+ d->st_events = NULL;
+ d->st_fifo = NULL;
+ d->st_packets = NULL;
+
+ d->rd_rbytes = NULL;
+ d->rd_rpackets = NULL;
+ d->rd_rerrors = NULL;
+ d->rd_rdrops = NULL;
+ d->rd_rfifo = NULL;
+ d->rd_rframe = NULL;
+ d->rd_rcompressed = NULL;
+ d->rd_rmulticast = NULL;
+
+ d->rd_tbytes = NULL;
+ d->rd_tpackets = NULL;
+ d->rd_terrors = NULL;
+ d->rd_tdrops = NULL;
+ d->rd_tfifo = NULL;
+ d->rd_tcollisions = NULL;
+ d->rd_tcarrier = NULL;
+ d->rd_tcompressed = NULL;
+}
+
+static void netdev_free_strings(struct netdev *d) {
+ freez((void *)d->chart_type_net_bytes);
+ freez((void *)d->chart_type_net_compressed);
+ freez((void *)d->chart_type_net_drops);
+ freez((void *)d->chart_type_net_errors);
+ freez((void *)d->chart_type_net_events);
+ freez((void *)d->chart_type_net_fifo);
+ freez((void *)d->chart_type_net_packets);
+
+ freez((void *)d->chart_id_net_bytes);
+ freez((void *)d->chart_id_net_compressed);
+ freez((void *)d->chart_id_net_drops);
+ freez((void *)d->chart_id_net_errors);
+ freez((void *)d->chart_id_net_events);
+ freez((void *)d->chart_id_net_fifo);
+ freez((void *)d->chart_id_net_packets);
+
+ freez((void *)d->chart_family);
+}
+
+static void netdev_free(struct netdev *d) {
+ netdev_charts_release(d);
+ netdev_free_strings(d);
+
+ freez((void *)d->name);
+ freez((void *)d);
+ netdev_added--;
+}
+
+
+// ----------------------------------------------------------------------------
+// netdev renames
+
+static struct netdev_rename {
+ const char *host_device;
+ uint32_t hash;
+
+ const char *container_device;
+ const char *container_name;
+
+ int processed;
+
+ struct netdev_rename *next;
+} *netdev_rename_root = NULL;
+
+static int netdev_pending_renames = 0;
+static netdata_mutex_t netdev_rename_mutex = NETDATA_MUTEX_INITIALIZER;
+
+static struct netdev_rename *netdev_rename_find(const char *host_device, uint32_t hash) {
+ struct netdev_rename *r;
+
+ for(r = netdev_rename_root; r ; r = r->next)
+ if(r->hash == hash && !strcmp(host_device, r->host_device))
+ return r;
+
+ return NULL;
+}
+
+// other threads can call this function to register a rename to a netdev
+void netdev_rename_device_add(const char *host_device, const char *container_device, const char *container_name) {
+ netdata_mutex_lock(&netdev_rename_mutex);
+
+ uint32_t hash = simple_hash(host_device);
+ struct netdev_rename *r = netdev_rename_find(host_device, hash);
+ if(!r) {
+ r = callocz(1, sizeof(struct netdev_rename));
+ r->host_device = strdupz(host_device);
+ r->container_device = strdupz(container_device);
+ r->container_name = strdupz(container_name);
+ r->hash = hash;
+ r->next = netdev_rename_root;
+ r->processed = 0;
+ netdev_rename_root = r;
+ netdev_pending_renames++;
+ info("CGROUP: registered network interface rename for '%s' as '%s' under '%s'", r->host_device, r->container_device, r->container_name);
+ }
+ else {
+ if(strcmp(r->container_device, container_device) != 0 || strcmp(r->container_name, container_name) != 0) {
+ freez((void *) r->container_device);
+ freez((void *) r->container_name);
+
+ r->container_device = strdupz(container_device);
+ r->container_name = strdupz(container_name);
+ r->processed = 0;
+ netdev_pending_renames++;
+ info("CGROUP: altered network interface rename for '%s' as '%s' under '%s'", r->host_device, r->container_device, r->container_name);
+ }
+ }
+
+ netdata_mutex_unlock(&netdev_rename_mutex);
+}
+
+// other threads can call this function to delete a rename to a netdev
+void netdev_rename_device_del(const char *host_device) {
+ netdata_mutex_lock(&netdev_rename_mutex);
+
+ struct netdev_rename *r, *last = NULL;
+
+ uint32_t hash = simple_hash(host_device);
+ for(r = netdev_rename_root; r ; last = r, r = r->next) {
+ if (r->hash == hash && !strcmp(host_device, r->host_device)) {
+ if (netdev_rename_root == r)
+ netdev_rename_root = r->next;
+ else if (last)
+ last->next = r->next;
+
+ if(!r->processed)
+ netdev_pending_renames--;
+
+ info("CGROUP: unregistered network interface rename for '%s' as '%s' under '%s'", r->host_device, r->container_device, r->container_name);
+
+ freez((void *) r->host_device);
+ freez((void *) r->container_name);
+ freez((void *) r->container_device);
+ freez((void *) r);
+ break;
+ }
+ }
+
+ netdata_mutex_unlock(&netdev_rename_mutex);
+}
+
+static inline void netdev_rename_cgroup(struct netdev *d, struct netdev_rename *r) {
+ info("CGROUP: renaming network interface '%s' as '%s' under '%s'", r->host_device, r->container_device, r->container_name);
+
+ netdev_charts_release(d);
+ netdev_free_strings(d);
+
+ char buffer[RRD_ID_LENGTH_MAX + 1];
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "cgroup_%s", r->container_name);
+ d->chart_type_net_bytes = strdupz(buffer);
+ d->chart_type_net_compressed = strdupz(buffer);
+ d->chart_type_net_drops = strdupz(buffer);
+ d->chart_type_net_errors = strdupz(buffer);
+ d->chart_type_net_events = strdupz(buffer);
+ d->chart_type_net_fifo = strdupz(buffer);
+ d->chart_type_net_packets = strdupz(buffer);
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_%s", r->container_device);
+ d->chart_id_net_bytes = strdupz(buffer);
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_compressed_%s", r->container_device);
+ d->chart_id_net_compressed = strdupz(buffer);
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_drops_%s", r->container_device);
+ d->chart_id_net_drops = strdupz(buffer);
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_errors_%s", r->container_device);
+ d->chart_id_net_errors = strdupz(buffer);
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_events_%s", r->container_device);
+ d->chart_id_net_events = strdupz(buffer);
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_fifo_%s", r->container_device);
+ d->chart_id_net_fifo = strdupz(buffer);
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_packets_%s", r->container_device);
+ d->chart_id_net_packets = strdupz(buffer);
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "net %s", r->container_device);
+ d->chart_family = strdupz(buffer);
+
+ d->priority = NETDATA_CHART_PRIO_CGROUP_NET_IFACE;
+ d->flipped = 1;
+}
+
+static inline void netdev_rename(struct netdev *d) {
+ struct netdev_rename *r = netdev_rename_find(d->name, d->hash);
+ if(unlikely(r && !r->processed)) {
+ netdev_rename_cgroup(d, r);
+ r->processed = 1;
+ netdev_pending_renames--;
+ }
+}
+
+static inline void netdev_rename_lock(struct netdev *d) {
+ netdata_mutex_lock(&netdev_rename_mutex);
+ netdev_rename(d);
+ netdata_mutex_unlock(&netdev_rename_mutex);
+}
+
+static inline void netdev_rename_all_lock(void) {
+ netdata_mutex_lock(&netdev_rename_mutex);
+
+ struct netdev *d;
+ for(d = netdev_root; d ; d = d->next)
+ netdev_rename(d);
+
+ netdev_pending_renames = 0;
+ netdata_mutex_unlock(&netdev_rename_mutex);
+}
+
+// ----------------------------------------------------------------------------
+// netdev data collection
+
+static void netdev_cleanup() {
+ if(likely(netdev_found == netdev_added)) return;
+
+ netdev_added = 0;
+ struct netdev *d = netdev_root, *last = NULL;
+ while(d) {
+ if(unlikely(!d->updated)) {
+ // info("Removing network device '%s', linked after '%s'", d->name, last?last->name:"ROOT");
+
+ if(netdev_last_used == d)
+ netdev_last_used = last;
+
+ struct netdev *t = d;
+
+ if(d == netdev_root || !last)
+ netdev_root = d = d->next;
+
+ else
+ last->next = d = d->next;
+
+ t->next = NULL;
+ netdev_free(t);
+ }
+ else {
+ netdev_added++;
+ last = d;
+ d->updated = 0;
+ d = d->next;
+ }
+ }
+}
+
+static struct netdev *get_netdev(const char *name) {
+ struct netdev *d;
+
+ uint32_t hash = simple_hash(name);
+
+ // search it, from the last position to the end
+ for(d = netdev_last_used ; d ; d = d->next) {
+ if(unlikely(hash == d->hash && !strcmp(name, d->name))) {
+ netdev_last_used = d->next;
+ return d;
+ }
+ }
+
+ // search it from the beginning to the last position we used
+ for(d = netdev_root ; d != netdev_last_used ; d = d->next) {
+ if(unlikely(hash == d->hash && !strcmp(name, d->name))) {
+ netdev_last_used = d->next;
+ return d;
+ }
+ }
+
+ // create a new one
+ d = callocz(1, sizeof(struct netdev));
+ d->name = strdupz(name);
+ d->hash = simple_hash(d->name);
+ d->len = strlen(d->name);
+
+ d->chart_type_net_bytes = strdupz("net");
+ d->chart_type_net_compressed = strdupz("net_compressed");
+ d->chart_type_net_drops = strdupz("net_drops");
+ d->chart_type_net_errors = strdupz("net_errors");
+ d->chart_type_net_events = strdupz("net_events");
+ d->chart_type_net_fifo = strdupz("net_fifo");
+ d->chart_type_net_packets = strdupz("net_packets");
+
+ d->chart_id_net_bytes = strdupz(d->name);
+ d->chart_id_net_compressed = strdupz(d->name);
+ d->chart_id_net_drops = strdupz(d->name);
+ d->chart_id_net_errors = strdupz(d->name);
+ d->chart_id_net_events = strdupz(d->name);
+ d->chart_id_net_fifo = strdupz(d->name);
+ d->chart_id_net_packets = strdupz(d->name);
+
+ d->chart_family = strdupz(d->name);
+ d->priority = NETDATA_CHART_PRIO_FIRST_NET_IFACE;
+
+ netdev_rename_lock(d);
+
+ netdev_added++;
+
+ // link it to the end
+ if(netdev_root) {
+ struct netdev *e;
+ for(e = netdev_root; e->next ; e = e->next) ;
+ e->next = d;
+ }
+ else
+ netdev_root = d;
+
+ return d;
+}
+
+int do_proc_net_dev(int update_every, usec_t dt) {
+ (void)dt;
+ static SIMPLE_PATTERN *disabled_list = NULL;
+ static procfile *ff = NULL;
+ static int enable_new_interfaces = -1;
+ static int do_bandwidth = -1, do_packets = -1, do_errors = -1, do_drops = -1, do_fifo = -1, do_compressed = -1, do_events = -1;
+ static char *path_to_sys_devices_virtual_net = NULL;
+ static char *path_to_sys_net_speed = NULL;
+
+ if(unlikely(enable_new_interfaces == -1)) {
+ char filename[FILENAME_MAX + 1];
+
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/virtual/net/%s");
+ path_to_sys_devices_virtual_net = config_get(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "path to get virtual interfaces", filename);
+
+ enable_new_interfaces = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "enable new interfaces detected at runtime", CONFIG_BOOLEAN_AUTO);
+
+ do_bandwidth = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "bandwidth for all interfaces", CONFIG_BOOLEAN_AUTO);
+ do_packets = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "packets for all interfaces", CONFIG_BOOLEAN_AUTO);
+ do_errors = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "errors for all interfaces", CONFIG_BOOLEAN_AUTO);
+ do_drops = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "drops for all interfaces", CONFIG_BOOLEAN_AUTO);
+ do_fifo = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "fifo for all interfaces", CONFIG_BOOLEAN_AUTO);
+ do_compressed = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "compressed packets for all interfaces", CONFIG_BOOLEAN_AUTO);
+ do_events = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "frames, collisions, carrier counters for all interfaces", CONFIG_BOOLEAN_AUTO);
+
+ disabled_list = simple_pattern_create(config_get(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "disable by default interfaces matching", "lo fireqos* *-ifb"), NULL, SIMPLE_PATTERN_EXACT);
+ }
+
+ if(unlikely(!ff)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, (*netdata_configured_host_prefix)?"/proc/1/net/dev":"/proc/net/dev");
+ ff = procfile_open(config_get(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "filename to monitor", filename), " \t,:|", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time
+
+ // rename all the devices, if we have pending renames
+ if(unlikely(netdev_pending_renames))
+ netdev_rename_all_lock();
+
+ netdev_found = 0;
+
+ kernel_uint_t system_rbytes = 0;
+ kernel_uint_t system_tbytes = 0;
+
+ size_t lines = procfile_lines(ff), l;
+ for(l = 2; l < lines ;l++) {
+ // require 17 words on each line
+ if(unlikely(procfile_linewords(ff, l) < 17)) continue;
+
+ struct netdev *d = get_netdev(procfile_lineword(ff, l, 0));
+ d->updated = 1;
+ netdev_found++;
+
+ if(unlikely(!d->configured)) {
+ // this is the first time we see this interface
+
+ // remember we configured it
+ d->configured = 1;
+
+ d->enabled = enable_new_interfaces;
+
+ if(d->enabled)
+ d->enabled = !simple_pattern_matches(disabled_list, d->name);
+
+ char buffer[FILENAME_MAX + 1];
+
+ snprintfz(buffer, FILENAME_MAX, path_to_sys_devices_virtual_net, d->name);
+ if(likely(access(buffer, R_OK) == 0)) {
+ d->virtual = 1;
+ }
+ else
+ d->virtual = 0;
+
+ // set nic speed if present
+ if(likely(!d->virtual)) {
+ snprintfz(buffer, FILENAME_MAX, "%s/sys/class/net/%s/speed", netdata_configured_host_prefix, d->name);
+ path_to_sys_net_speed = config_get(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "path to get net device speed", buffer);
+ int ret = read_single_number_file(path_to_sys_net_speed, (unsigned long long*)&d->speed_max);
+ if(ret) error("Cannot read '%s'.", path_to_sys_net_speed);
+ }
+
+ snprintfz(buffer, FILENAME_MAX, "plugin:proc:/proc/net/dev:%s", d->name);
+ d->enabled = config_get_boolean_ondemand(buffer, "enabled", d->enabled);
+ d->virtual = config_get_boolean(buffer, "virtual", d->virtual);
+
+ if(d->enabled == CONFIG_BOOLEAN_NO)
+ continue;
+
+ d->do_bandwidth = config_get_boolean_ondemand(buffer, "bandwidth", do_bandwidth);
+ d->do_packets = config_get_boolean_ondemand(buffer, "packets", do_packets);
+ d->do_errors = config_get_boolean_ondemand(buffer, "errors", do_errors);
+ d->do_drops = config_get_boolean_ondemand(buffer, "drops", do_drops);
+ d->do_fifo = config_get_boolean_ondemand(buffer, "fifo", do_fifo);
+ d->do_compressed = config_get_boolean_ondemand(buffer, "compressed", do_compressed);
+ d->do_events = config_get_boolean_ondemand(buffer, "events", do_events);
+ }
+
+ if(unlikely(!d->enabled))
+ continue;
+
+ if(likely(d->do_bandwidth != CONFIG_BOOLEAN_NO || !d->virtual)) {
+ d->rbytes = str2kernel_uint_t(procfile_lineword(ff, l, 1));
+ d->tbytes = str2kernel_uint_t(procfile_lineword(ff, l, 9));
+
+ if(likely(!d->virtual)) {
+ system_rbytes += d->rbytes;
+ system_tbytes += d->tbytes;
+ }
+ }
+
+ if(likely(d->do_packets != CONFIG_BOOLEAN_NO)) {
+ d->rpackets = str2kernel_uint_t(procfile_lineword(ff, l, 2));
+ d->rmulticast = str2kernel_uint_t(procfile_lineword(ff, l, 8));
+ d->tpackets = str2kernel_uint_t(procfile_lineword(ff, l, 10));
+ }
+
+ if(likely(d->do_errors != CONFIG_BOOLEAN_NO)) {
+ d->rerrors = str2kernel_uint_t(procfile_lineword(ff, l, 3));
+ d->terrors = str2kernel_uint_t(procfile_lineword(ff, l, 11));
+ }
+
+ if(likely(d->do_drops != CONFIG_BOOLEAN_NO)) {
+ d->rdrops = str2kernel_uint_t(procfile_lineword(ff, l, 4));
+ d->tdrops = str2kernel_uint_t(procfile_lineword(ff, l, 12));
+ }
+
+ if(likely(d->do_fifo != CONFIG_BOOLEAN_NO)) {
+ d->rfifo = str2kernel_uint_t(procfile_lineword(ff, l, 5));
+ d->tfifo = str2kernel_uint_t(procfile_lineword(ff, l, 13));
+ }
+
+ if(likely(d->do_compressed != CONFIG_BOOLEAN_NO)) {
+ d->rcompressed = str2kernel_uint_t(procfile_lineword(ff, l, 7));
+ d->tcompressed = str2kernel_uint_t(procfile_lineword(ff, l, 16));
+ }
+
+ if(likely(d->do_events != CONFIG_BOOLEAN_NO)) {
+ d->rframe = str2kernel_uint_t(procfile_lineword(ff, l, 6));
+ d->tcollisions = str2kernel_uint_t(procfile_lineword(ff, l, 14));
+ d->tcarrier = str2kernel_uint_t(procfile_lineword(ff, l, 15));
+ }
+
+ // --------------------------------------------------------------------
+
+ if(unlikely((d->do_bandwidth == CONFIG_BOOLEAN_AUTO && (d->rbytes || d->tbytes))))
+ d->do_bandwidth = CONFIG_BOOLEAN_YES;
+
+ if(d->do_bandwidth == CONFIG_BOOLEAN_YES) {
+ if(unlikely(!d->st_bandwidth)) {
+
+ d->st_bandwidth = rrdset_create_localhost(
+ d->chart_type_net_bytes
+ , d->chart_id_net_bytes
+ , NULL
+ , d->chart_family
+ , "net.net"
+ , "Bandwidth"
+ , "kilobits/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETDEV_NAME
+ , d->priority
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ RRDSETVAR *nic_speed_max = rrdsetvar_custom_chart_variable_create(d->st_bandwidth, "nic_speed_max");
+ if(nic_speed_max) rrdsetvar_custom_chart_variable_set(nic_speed_max, (calculated_number)d->speed_max);
+
+ d->rd_rbytes = rrddim_add(d->st_bandwidth, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_tbytes = rrddim_add(d->st_bandwidth, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+
+ if(d->flipped) {
+ // flip receive/trasmit
+
+ RRDDIM *td = d->rd_rbytes;
+ d->rd_rbytes = d->rd_tbytes;
+ d->rd_tbytes = td;
+ }
+ }
+ else rrdset_next(d->st_bandwidth);
+
+ rrddim_set_by_pointer(d->st_bandwidth, d->rd_rbytes, (collected_number)d->rbytes);
+ rrddim_set_by_pointer(d->st_bandwidth, d->rd_tbytes, (collected_number)d->tbytes);
+ rrdset_done(d->st_bandwidth);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(unlikely((d->do_packets == CONFIG_BOOLEAN_AUTO && (d->rpackets || d->tpackets || d->rmulticast))))
+ d->do_packets = CONFIG_BOOLEAN_YES;
+
+ if(d->do_packets == CONFIG_BOOLEAN_YES) {
+ if(unlikely(!d->st_packets)) {
+
+ d->st_packets = rrdset_create_localhost(
+ d->chart_type_net_packets
+ , d->chart_id_net_packets
+ , NULL
+ , d->chart_family
+ , "net.packets"
+ , "Packets"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETDEV_NAME
+ , d->priority + 1
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(d->st_packets, RRDSET_FLAG_DETAIL);
+
+ d->rd_rpackets = rrddim_add(d->st_packets, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_tpackets = rrddim_add(d->st_packets, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_rmulticast = rrddim_add(d->st_packets, "multicast", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ if(d->flipped) {
+ // flip receive/trasmit
+
+ RRDDIM *td = d->rd_rpackets;
+ d->rd_rpackets = d->rd_tpackets;
+ d->rd_tpackets = td;
+ }
+ }
+ else rrdset_next(d->st_packets);
+
+ rrddim_set_by_pointer(d->st_packets, d->rd_rpackets, (collected_number)d->rpackets);
+ rrddim_set_by_pointer(d->st_packets, d->rd_tpackets, (collected_number)d->tpackets);
+ rrddim_set_by_pointer(d->st_packets, d->rd_rmulticast, (collected_number)d->rmulticast);
+ rrdset_done(d->st_packets);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(unlikely((d->do_errors == CONFIG_BOOLEAN_AUTO && (d->rerrors || d->terrors))))
+ d->do_errors = CONFIG_BOOLEAN_YES;
+
+ if(d->do_errors == CONFIG_BOOLEAN_YES) {
+ if(unlikely(!d->st_errors)) {
+
+ d->st_errors = rrdset_create_localhost(
+ d->chart_type_net_errors
+ , d->chart_id_net_errors
+ , NULL
+ , d->chart_family
+ , "net.errors"
+ , "Interface Errors"
+ , "errors/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETDEV_NAME
+ , d->priority + 2
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(d->st_errors, RRDSET_FLAG_DETAIL);
+
+ d->rd_rerrors = rrddim_add(d->st_errors, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_terrors = rrddim_add(d->st_errors, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ if(d->flipped) {
+ // flip receive/trasmit
+
+ RRDDIM *td = d->rd_rerrors;
+ d->rd_rerrors = d->rd_terrors;
+ d->rd_terrors = td;
+ }
+ }
+ else rrdset_next(d->st_errors);
+
+ rrddim_set_by_pointer(d->st_errors, d->rd_rerrors, (collected_number)d->rerrors);
+ rrddim_set_by_pointer(d->st_errors, d->rd_terrors, (collected_number)d->terrors);
+ rrdset_done(d->st_errors);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(unlikely((d->do_drops == CONFIG_BOOLEAN_AUTO && (d->rdrops || d->tdrops))))
+ d->do_drops = CONFIG_BOOLEAN_YES;
+
+ if(d->do_drops == CONFIG_BOOLEAN_YES) {
+ if(unlikely(!d->st_drops)) {
+
+ d->st_drops = rrdset_create_localhost(
+ d->chart_type_net_drops
+ , d->chart_id_net_drops
+ , NULL
+ , d->chart_family
+ , "net.drops"
+ , "Interface Drops"
+ , "drops/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETDEV_NAME
+ , d->priority + 3
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(d->st_drops, RRDSET_FLAG_DETAIL);
+
+ d->rd_rdrops = rrddim_add(d->st_drops, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_tdrops = rrddim_add(d->st_drops, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ if(d->flipped) {
+ // flip receive/trasmit
+
+ RRDDIM *td = d->rd_rdrops;
+ d->rd_rdrops = d->rd_tdrops;
+ d->rd_tdrops = td;
+ }
+ }
+ else rrdset_next(d->st_drops);
+
+ rrddim_set_by_pointer(d->st_drops, d->rd_rdrops, (collected_number)d->rdrops);
+ rrddim_set_by_pointer(d->st_drops, d->rd_tdrops, (collected_number)d->tdrops);
+ rrdset_done(d->st_drops);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(unlikely((d->do_fifo == CONFIG_BOOLEAN_AUTO && (d->rfifo || d->tfifo))))
+ d->do_fifo = CONFIG_BOOLEAN_YES;
+
+ if(d->do_fifo == CONFIG_BOOLEAN_YES) {
+ if(unlikely(!d->st_fifo)) {
+
+ d->st_fifo = rrdset_create_localhost(
+ d->chart_type_net_fifo
+ , d->chart_id_net_fifo
+ , NULL
+ , d->chart_family
+ , "net.fifo"
+ , "Interface FIFO Buffer Errors"
+ , "errors"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETDEV_NAME
+ , d->priority + 4
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(d->st_fifo, RRDSET_FLAG_DETAIL);
+
+ d->rd_rfifo = rrddim_add(d->st_fifo, "receive", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_tfifo = rrddim_add(d->st_fifo, "transmit", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ if(d->flipped) {
+ // flip receive/trasmit
+
+ RRDDIM *td = d->rd_rfifo;
+ d->rd_rfifo = d->rd_tfifo;
+ d->rd_tfifo = td;
+ }
+ }
+ else rrdset_next(d->st_fifo);
+
+ rrddim_set_by_pointer(d->st_fifo, d->rd_rfifo, (collected_number)d->rfifo);
+ rrddim_set_by_pointer(d->st_fifo, d->rd_tfifo, (collected_number)d->tfifo);
+ rrdset_done(d->st_fifo);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(unlikely((d->do_compressed == CONFIG_BOOLEAN_AUTO && (d->rcompressed || d->tcompressed))))
+ d->do_compressed = CONFIG_BOOLEAN_YES;
+
+ if(d->do_compressed == CONFIG_BOOLEAN_YES) {
+ if(unlikely(!d->st_compressed)) {
+
+ d->st_compressed = rrdset_create_localhost(
+ d->chart_type_net_compressed
+ , d->chart_id_net_compressed
+ , NULL
+ , d->chart_family
+ , "net.compressed"
+ , "Compressed Packets"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETDEV_NAME
+ , d->priority + 5
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(d->st_compressed, RRDSET_FLAG_DETAIL);
+
+ d->rd_rcompressed = rrddim_add(d->st_compressed, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_tcompressed = rrddim_add(d->st_compressed, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ if(d->flipped) {
+ // flip receive/trasmit
+
+ RRDDIM *td = d->rd_rcompressed;
+ d->rd_rcompressed = d->rd_tcompressed;
+ d->rd_tcompressed = td;
+ }
+ }
+ else rrdset_next(d->st_compressed);
+
+ rrddim_set_by_pointer(d->st_compressed, d->rd_rcompressed, (collected_number)d->rcompressed);
+ rrddim_set_by_pointer(d->st_compressed, d->rd_tcompressed, (collected_number)d->tcompressed);
+ rrdset_done(d->st_compressed);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(unlikely((d->do_events == CONFIG_BOOLEAN_AUTO && (d->rframe || d->tcollisions || d->tcarrier))))
+ d->do_events = CONFIG_BOOLEAN_YES;
+
+ if(d->do_events == CONFIG_BOOLEAN_YES) {
+ if(unlikely(!d->st_events)) {
+
+ d->st_events = rrdset_create_localhost(
+ d->chart_type_net_events
+ , d->chart_id_net_events
+ , NULL
+ , d->chart_family
+ , "net.events"
+ , "Network Interface Events"
+ , "events/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETDEV_NAME
+ , d->priority + 6
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(d->st_events, RRDSET_FLAG_DETAIL);
+
+ d->rd_rframe = rrddim_add(d->st_events, "frames", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_tcollisions = rrddim_add(d->st_events, "collisions", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_tcarrier = rrddim_add(d->st_events, "carrier", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(d->st_events);
+
+ rrddim_set_by_pointer(d->st_events, d->rd_rframe, (collected_number)d->rframe);
+ rrddim_set_by_pointer(d->st_events, d->rd_tcollisions, (collected_number)d->tcollisions);
+ rrddim_set_by_pointer(d->st_events, d->rd_tcarrier, (collected_number)d->tcarrier);
+ rrdset_done(d->st_events);
+ }
+ }
+
+ if(do_bandwidth == CONFIG_BOOLEAN_YES || (do_bandwidth == CONFIG_BOOLEAN_AUTO && (system_rbytes || system_tbytes))) {
+ do_bandwidth = CONFIG_BOOLEAN_YES;
+ static RRDSET *st_system_net = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL;
+
+ if(unlikely(!st_system_net)) {
+ st_system_net = rrdset_create_localhost(
+ "system"
+ , "net"
+ , NULL
+ , "network"
+ , NULL
+ , "Physical Network Interfaces Aggregated Bandwidth"
+ , "kilobits/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETDEV_NAME
+ , NETDATA_CHART_PRIO_SYSTEM_NET
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_in = rrddim_add(st_system_net, "InOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st_system_net, "OutOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_system_net);
+
+ rrddim_set_by_pointer(st_system_net, rd_in, (collected_number)system_rbytes);
+ rrddim_set_by_pointer(st_system_net, rd_out, (collected_number)system_tbytes);
+
+ rrdset_done(st_system_net);
+ }
+
+ netdev_cleanup();
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_net_ip_vs_stats.c b/collectors/proc.plugin/proc_net_ip_vs_stats.c
new file mode 100644
index 000000000..43dcf2a88
--- /dev/null
+++ b/collectors/proc.plugin/proc_net_ip_vs_stats.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define RRD_TYPE_NET_IPVS "ipvs"
+#define PLUGIN_PROC_MODULE_NET_IPVS_NAME "/proc/net/ip_vs_stats"
+#define CONFIG_SECTION_PLUGIN_PROC_NET_IPVS "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_NET_IPVS_NAME
+
+int do_proc_net_ip_vs_stats(int update_every, usec_t dt) {
+ (void)dt;
+ static int do_bandwidth = -1, do_sockets = -1, do_packets = -1;
+ static procfile *ff = NULL;
+
+ if(do_bandwidth == -1) do_bandwidth = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_NET_IPVS, "IPVS bandwidth", 1);
+ if(do_sockets == -1) do_sockets = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_NET_IPVS, "IPVS connections", 1);
+ if(do_packets == -1) do_packets = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_NET_IPVS, "IPVS packets", 1);
+
+ if(!ff) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/ip_vs_stats");
+ ff = procfile_open(config_get(CONFIG_SECTION_PLUGIN_PROC_NET_IPVS, "filename to monitor", filename), " \t,:|", PROCFILE_FLAG_DEFAULT);
+ }
+ if(!ff) return 1;
+
+ ff = procfile_readall(ff);
+ if(!ff) return 0; // we return 0, so that we will retry to open it next time
+
+ // make sure we have 3 lines
+ if(procfile_lines(ff) < 3) return 1;
+
+ // make sure we have 5 words on the 3rd line
+ if(procfile_linewords(ff, 2) < 5) return 1;
+
+ unsigned long long entries, InPackets, OutPackets, InBytes, OutBytes;
+
+ entries = strtoull(procfile_lineword(ff, 2, 0), NULL, 16);
+ InPackets = strtoull(procfile_lineword(ff, 2, 1), NULL, 16);
+ OutPackets = strtoull(procfile_lineword(ff, 2, 2), NULL, 16);
+ InBytes = strtoull(procfile_lineword(ff, 2, 3), NULL, 16);
+ OutBytes = strtoull(procfile_lineword(ff, 2, 4), NULL, 16);
+
+
+ // --------------------------------------------------------------------
+
+ if(do_sockets) {
+ static RRDSET *st = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_IPVS
+ , "sockets"
+ , NULL
+ , RRD_TYPE_NET_IPVS
+ , NULL
+ , "IPVS New Connections"
+ , "connections/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_IPVS_NAME
+ , NETDATA_CHART_PRIO_IPVS_SOCKETS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "connections", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "connections", entries);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_packets) {
+ static RRDSET *st = NULL;
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_IPVS
+ , "packets"
+ , NULL
+ , RRD_TYPE_NET_IPVS
+ , NULL
+ , "IPVS Packets"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_IPVS_NAME
+ , NETDATA_CHART_PRIO_IPVS_PACKETS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "received", InPackets);
+ rrddim_set(st, "sent", OutPackets);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_bandwidth) {
+ static RRDSET *st = NULL;
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_IPVS
+ , "net"
+ , NULL
+ , RRD_TYPE_NET_IPVS
+ , NULL
+ , "IPVS Bandwidth"
+ , "kilobits/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_IPVS_NAME
+ , NETDATA_CHART_PRIO_IPVS_NET
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rrddim_add(st, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "received", InBytes);
+ rrddim_set(st, "sent", OutBytes);
+ rrdset_done(st);
+ }
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_net_netstat.c b/collectors/proc.plugin/proc_net_netstat.c
new file mode 100644
index 000000000..2dc3c59c0
--- /dev/null
+++ b/collectors/proc.plugin/proc_net_netstat.c
@@ -0,0 +1,818 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define RRD_TYPE_NET_NETSTAT "ip"
+#define PLUGIN_PROC_MODULE_NETSTAT_NAME "/proc/net/netstat"
+#define CONFIG_SECTION_PLUGIN_PROC_NETSTAT "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_NETSTAT_NAME
+
+unsigned long long tcpext_TCPSynRetrans = 0;
+
+static void parse_line_pair(procfile *ff, ARL_BASE *base, size_t header_line, size_t values_line) {
+ size_t hwords = procfile_linewords(ff, header_line);
+ size_t vwords = procfile_linewords(ff, values_line);
+ size_t w;
+
+ if(unlikely(vwords > hwords)) {
+ error("File /proc/net/netstat on header line %zu has %zu words, but on value line %zu has %zu words.", header_line, hwords, values_line, vwords);
+ vwords = hwords;
+ }
+
+ for(w = 1; w < vwords ;w++) {
+ if(unlikely(arl_check(base, procfile_lineword(ff, header_line, w), procfile_lineword(ff, values_line, w))))
+ break;
+ }
+}
+
+int do_proc_net_netstat(int update_every, usec_t dt) {
+ (void)dt;
+
+ static int do_bandwidth = -1, do_inerrors = -1, do_mcast = -1, do_bcast = -1, do_mcast_p = -1, do_bcast_p = -1, do_ecn = -1, \
+ do_tcpext_reorder = -1, do_tcpext_syscookies = -1, do_tcpext_ofo = -1, do_tcpext_connaborts = -1, do_tcpext_memory = -1,
+ do_tcpext_syn_queue = -1, do_tcpext_accept_queue = -1;
+
+ static uint32_t hash_ipext = 0, hash_tcpext = 0;
+ static procfile *ff = NULL;
+
+ static ARL_BASE *arl_tcpext = NULL;
+ static ARL_BASE *arl_ipext = NULL;
+
+ // --------------------------------------------------------------------
+ // IP
+
+ // IP bandwidth
+ static unsigned long long ipext_InOctets = 0;
+ static unsigned long long ipext_OutOctets = 0;
+
+ // IP input errors
+ static unsigned long long ipext_InNoRoutes = 0;
+ static unsigned long long ipext_InTruncatedPkts = 0;
+ static unsigned long long ipext_InCsumErrors = 0;
+
+ // IP multicast bandwidth
+ static unsigned long long ipext_InMcastOctets = 0;
+ static unsigned long long ipext_OutMcastOctets = 0;
+
+ // IP multicast packets
+ static unsigned long long ipext_InMcastPkts = 0;
+ static unsigned long long ipext_OutMcastPkts = 0;
+
+ // IP broadcast bandwidth
+ static unsigned long long ipext_InBcastOctets = 0;
+ static unsigned long long ipext_OutBcastOctets = 0;
+
+ // IP broadcast packets
+ static unsigned long long ipext_InBcastPkts = 0;
+ static unsigned long long ipext_OutBcastPkts = 0;
+
+ // IP ECN
+ static unsigned long long ipext_InNoECTPkts = 0;
+ static unsigned long long ipext_InECT1Pkts = 0;
+ static unsigned long long ipext_InECT0Pkts = 0;
+ static unsigned long long ipext_InCEPkts = 0;
+
+ // --------------------------------------------------------------------
+ // IP TCP
+
+ // IP TCP Reordering
+ static unsigned long long tcpext_TCPRenoReorder = 0;
+ static unsigned long long tcpext_TCPFACKReorder = 0;
+ static unsigned long long tcpext_TCPSACKReorder = 0;
+ static unsigned long long tcpext_TCPTSReorder = 0;
+
+ // IP TCP SYN Cookies
+ static unsigned long long tcpext_SyncookiesSent = 0;
+ static unsigned long long tcpext_SyncookiesRecv = 0;
+ static unsigned long long tcpext_SyncookiesFailed = 0;
+
+ // IP TCP Out Of Order Queue
+ // http://www.spinics.net/lists/netdev/msg204696.html
+ static unsigned long long tcpext_TCPOFOQueue = 0; // Number of packets queued in OFO queue
+ static unsigned long long tcpext_TCPOFODrop = 0; // Number of packets meant to be queued in OFO but dropped because socket rcvbuf limit hit.
+ static unsigned long long tcpext_TCPOFOMerge = 0; // Number of packets in OFO that were merged with other packets.
+ static unsigned long long tcpext_OfoPruned = 0; // packets dropped from out-of-order queue because of socket buffer overrun
+
+ // IP TCP connection resets
+ // https://github.com/ecki/net-tools/blob/bd8bceaed2311651710331a7f8990c3e31be9840/statistics.c
+ static unsigned long long tcpext_TCPAbortOnData = 0; // connections reset due to unexpected data
+ static unsigned long long tcpext_TCPAbortOnClose = 0; // connections reset due to early user close
+ static unsigned long long tcpext_TCPAbortOnMemory = 0; // connections aborted due to memory pressure
+ static unsigned long long tcpext_TCPAbortOnTimeout = 0; // connections aborted due to timeout
+ static unsigned long long tcpext_TCPAbortOnLinger = 0; // connections aborted after user close in linger timeout
+ static unsigned long long tcpext_TCPAbortFailed = 0; // times unable to send RST due to no memory
+
+ // https://perfchron.com/2015/12/26/investigating-linux-network-issues-with-netstat-and-nstat/
+ static unsigned long long tcpext_ListenOverflows = 0; // times the listen queue of a socket overflowed
+ static unsigned long long tcpext_ListenDrops = 0; // SYNs to LISTEN sockets ignored
+
+ // IP TCP memory pressures
+ static unsigned long long tcpext_TCPMemoryPressures = 0;
+
+ static unsigned long long tcpext_TCPReqQFullDrop = 0;
+ static unsigned long long tcpext_TCPReqQFullDoCookies = 0;
+
+ // shared: tcpext_TCPSynRetrans
+
+
+ if(unlikely(!arl_ipext)) {
+ hash_ipext = simple_hash("IpExt");
+ hash_tcpext = simple_hash("TcpExt");
+
+ do_bandwidth = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "bandwidth", CONFIG_BOOLEAN_AUTO);
+ do_inerrors = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "input errors", CONFIG_BOOLEAN_AUTO);
+ do_mcast = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "multicast bandwidth", CONFIG_BOOLEAN_AUTO);
+ do_bcast = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "broadcast bandwidth", CONFIG_BOOLEAN_AUTO);
+ do_mcast_p = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "multicast packets", CONFIG_BOOLEAN_AUTO);
+ do_bcast_p = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "broadcast packets", CONFIG_BOOLEAN_AUTO);
+ do_ecn = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "ECN packets", CONFIG_BOOLEAN_AUTO);
+
+ do_tcpext_reorder = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "TCP reorders", CONFIG_BOOLEAN_AUTO);
+ do_tcpext_syscookies = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "TCP SYN cookies", CONFIG_BOOLEAN_AUTO);
+ do_tcpext_ofo = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "TCP out-of-order queue", CONFIG_BOOLEAN_AUTO);
+ do_tcpext_connaborts = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "TCP connection aborts", CONFIG_BOOLEAN_AUTO);
+ do_tcpext_memory = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "TCP memory pressures", CONFIG_BOOLEAN_AUTO);
+
+ do_tcpext_syn_queue = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "TCP SYN queue", CONFIG_BOOLEAN_AUTO);
+ do_tcpext_accept_queue = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "TCP accept queue", CONFIG_BOOLEAN_AUTO);
+
+ arl_ipext = arl_create("netstat/ipext", NULL, 60);
+ arl_tcpext = arl_create("netstat/tcpext", NULL, 60);
+
+ // --------------------------------------------------------------------
+ // IP
+
+ if(do_bandwidth != CONFIG_BOOLEAN_NO) {
+ arl_expect(arl_ipext, "InOctets", &ipext_InOctets);
+ arl_expect(arl_ipext, "OutOctets", &ipext_OutOctets);
+ }
+
+ if(do_inerrors != CONFIG_BOOLEAN_NO) {
+ arl_expect(arl_ipext, "InNoRoutes", &ipext_InNoRoutes);
+ arl_expect(arl_ipext, "InTruncatedPkts", &ipext_InTruncatedPkts);
+ arl_expect(arl_ipext, "InCsumErrors", &ipext_InCsumErrors);
+ }
+
+ if(do_mcast != CONFIG_BOOLEAN_NO) {
+ arl_expect(arl_ipext, "InMcastOctets", &ipext_InMcastOctets);
+ arl_expect(arl_ipext, "OutMcastOctets", &ipext_OutMcastOctets);
+ }
+
+ if(do_mcast_p != CONFIG_BOOLEAN_NO) {
+ arl_expect(arl_ipext, "InMcastPkts", &ipext_InMcastPkts);
+ arl_expect(arl_ipext, "OutMcastPkts", &ipext_OutMcastPkts);
+ }
+
+ if(do_bcast != CONFIG_BOOLEAN_NO) {
+ arl_expect(arl_ipext, "InBcastPkts", &ipext_InBcastPkts);
+ arl_expect(arl_ipext, "OutBcastPkts", &ipext_OutBcastPkts);
+ }
+
+ if(do_bcast_p != CONFIG_BOOLEAN_NO) {
+ arl_expect(arl_ipext, "InBcastOctets", &ipext_InBcastOctets);
+ arl_expect(arl_ipext, "OutBcastOctets", &ipext_OutBcastOctets);
+ }
+
+ if(do_ecn != CONFIG_BOOLEAN_NO) {
+ arl_expect(arl_ipext, "InNoECTPkts", &ipext_InNoECTPkts);
+ arl_expect(arl_ipext, "InECT1Pkts", &ipext_InECT1Pkts);
+ arl_expect(arl_ipext, "InECT0Pkts", &ipext_InECT0Pkts);
+ arl_expect(arl_ipext, "InCEPkts", &ipext_InCEPkts);
+ }
+
+ // --------------------------------------------------------------------
+ // IP TCP
+
+ if(do_tcpext_reorder != CONFIG_BOOLEAN_NO) {
+ arl_expect(arl_tcpext, "TCPFACKReorder", &tcpext_TCPFACKReorder);
+ arl_expect(arl_tcpext, "TCPSACKReorder", &tcpext_TCPSACKReorder);
+ arl_expect(arl_tcpext, "TCPRenoReorder", &tcpext_TCPRenoReorder);
+ arl_expect(arl_tcpext, "TCPTSReorder", &tcpext_TCPTSReorder);
+ }
+
+ if(do_tcpext_syscookies != CONFIG_BOOLEAN_NO) {
+ arl_expect(arl_tcpext, "SyncookiesSent", &tcpext_SyncookiesSent);
+ arl_expect(arl_tcpext, "SyncookiesRecv", &tcpext_SyncookiesRecv);
+ arl_expect(arl_tcpext, "SyncookiesFailed", &tcpext_SyncookiesFailed);
+ }
+
+ if(do_tcpext_ofo != CONFIG_BOOLEAN_NO) {
+ arl_expect(arl_tcpext, "TCPOFOQueue", &tcpext_TCPOFOQueue);
+ arl_expect(arl_tcpext, "TCPOFODrop", &tcpext_TCPOFODrop);
+ arl_expect(arl_tcpext, "TCPOFOMerge", &tcpext_TCPOFOMerge);
+ arl_expect(arl_tcpext, "OfoPruned", &tcpext_OfoPruned);
+ }
+
+ if(do_tcpext_connaborts != CONFIG_BOOLEAN_NO) {
+ arl_expect(arl_tcpext, "TCPAbortOnData", &tcpext_TCPAbortOnData);
+ arl_expect(arl_tcpext, "TCPAbortOnClose", &tcpext_TCPAbortOnClose);
+ arl_expect(arl_tcpext, "TCPAbortOnMemory", &tcpext_TCPAbortOnMemory);
+ arl_expect(arl_tcpext, "TCPAbortOnTimeout", &tcpext_TCPAbortOnTimeout);
+ arl_expect(arl_tcpext, "TCPAbortOnLinger", &tcpext_TCPAbortOnLinger);
+ arl_expect(arl_tcpext, "TCPAbortFailed", &tcpext_TCPAbortFailed);
+ }
+
+ if(do_tcpext_memory != CONFIG_BOOLEAN_NO) {
+ arl_expect(arl_tcpext, "TCPMemoryPressures", &tcpext_TCPMemoryPressures);
+ }
+
+ if(do_tcpext_accept_queue != CONFIG_BOOLEAN_NO) {
+ arl_expect(arl_tcpext, "ListenOverflows", &tcpext_ListenOverflows);
+ arl_expect(arl_tcpext, "ListenDrops", &tcpext_ListenDrops);
+ }
+
+ if(do_tcpext_syn_queue != CONFIG_BOOLEAN_NO) {
+ arl_expect(arl_tcpext, "TCPReqQFullDrop", &tcpext_TCPReqQFullDrop);
+ arl_expect(arl_tcpext, "TCPReqQFullDoCookies", &tcpext_TCPReqQFullDoCookies);
+ }
+
+ // shared metrics
+ arl_expect(arl_tcpext, "TCPSynRetrans", &tcpext_TCPSynRetrans);
+ }
+
+ if(unlikely(!ff)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/netstat");
+ ff = procfile_open(config_get(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time
+
+ size_t lines = procfile_lines(ff), l;
+ size_t words;
+
+ arl_begin(arl_ipext);
+ arl_begin(arl_tcpext);
+
+ for(l = 0; l < lines ;l++) {
+ char *key = procfile_lineword(ff, l, 0);
+ uint32_t hash = simple_hash(key);
+
+ if(unlikely(hash == hash_ipext && strcmp(key, "IpExt") == 0)) {
+ size_t h = l++;
+
+ words = procfile_linewords(ff, l);
+ if(unlikely(words < 2)) {
+ error("Cannot read /proc/net/netstat IpExt line. Expected 2+ params, read %zu.", words);
+ continue;
+ }
+
+ parse_line_pair(ff, arl_ipext, h, l);
+
+ // --------------------------------------------------------------------
+
+ if(do_bandwidth == CONFIG_BOOLEAN_YES || (do_bandwidth == CONFIG_BOOLEAN_AUTO && (ipext_InOctets || ipext_OutOctets))) {
+ do_bandwidth = CONFIG_BOOLEAN_YES;
+ static RRDSET *st_system_ip = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL;
+
+ if(unlikely(!st_system_ip)) {
+ st_system_ip = rrdset_create_localhost(
+ "system"
+ , RRD_TYPE_NET_NETSTAT
+ , NULL
+ , "network"
+ , NULL
+ , "IP Bandwidth"
+ , "kilobits/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETSTAT_NAME
+ , NETDATA_CHART_PRIO_SYSTEM_IP
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_in = rrddim_add(st_system_ip, "InOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st_system_ip, "OutOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_system_ip);
+
+ rrddim_set_by_pointer(st_system_ip, rd_in, ipext_InOctets);
+ rrddim_set_by_pointer(st_system_ip, rd_out, ipext_OutOctets);
+
+ rrdset_done(st_system_ip);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_inerrors == CONFIG_BOOLEAN_YES || (do_inerrors == CONFIG_BOOLEAN_AUTO && (ipext_InNoRoutes || ipext_InTruncatedPkts))) {
+ do_inerrors = CONFIG_BOOLEAN_YES;
+ static RRDSET *st_ip_inerrors = NULL;
+ static RRDDIM *rd_noroutes = NULL, *rd_truncated = NULL, *rd_checksum = NULL;
+
+ if(unlikely(!st_ip_inerrors)) {
+ st_ip_inerrors = rrdset_create_localhost(
+ RRD_TYPE_NET_NETSTAT
+ , "inerrors"
+ , NULL
+ , "errors"
+ , NULL
+ , "IP Input Errors"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETSTAT_NAME
+ , NETDATA_CHART_PRIO_IP_ERRORS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st_ip_inerrors, RRDSET_FLAG_DETAIL);
+
+ rd_noroutes = rrddim_add(st_ip_inerrors, "InNoRoutes", "noroutes", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_truncated = rrddim_add(st_ip_inerrors, "InTruncatedPkts", "truncated", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_checksum = rrddim_add(st_ip_inerrors, "InCsumErrors", "checksum", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_ip_inerrors);
+
+ rrddim_set_by_pointer(st_ip_inerrors, rd_noroutes, ipext_InNoRoutes);
+ rrddim_set_by_pointer(st_ip_inerrors, rd_truncated, ipext_InTruncatedPkts);
+ rrddim_set_by_pointer(st_ip_inerrors, rd_checksum, ipext_InCsumErrors);
+
+ rrdset_done(st_ip_inerrors);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_mcast == CONFIG_BOOLEAN_YES || (do_mcast == CONFIG_BOOLEAN_AUTO && (ipext_InMcastOctets || ipext_OutMcastOctets))) {
+ do_mcast = CONFIG_BOOLEAN_YES;
+ static RRDSET *st_ip_mcast = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL;
+
+ if(unlikely(!st_ip_mcast)) {
+ st_ip_mcast = rrdset_create_localhost(
+ RRD_TYPE_NET_NETSTAT
+ , "mcast"
+ , NULL
+ , "multicast"
+ , NULL
+ , "IP Multicast Bandwidth"
+ , "kilobits/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETSTAT_NAME
+ , NETDATA_CHART_PRIO_IP_MCAST
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rrdset_flag_set(st_ip_mcast, RRDSET_FLAG_DETAIL);
+
+ rd_in = rrddim_add(st_ip_mcast, "InMcastOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st_ip_mcast, "OutMcastOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_ip_mcast);
+
+ rrddim_set_by_pointer(st_ip_mcast, rd_in, ipext_InMcastOctets);
+ rrddim_set_by_pointer(st_ip_mcast, rd_out, ipext_OutMcastOctets);
+
+ rrdset_done(st_ip_mcast);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_bcast == CONFIG_BOOLEAN_YES || (do_bcast == CONFIG_BOOLEAN_AUTO && (ipext_InBcastOctets || ipext_OutBcastOctets))) {
+ do_bcast = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_ip_bcast = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL;
+
+ if(unlikely(!st_ip_bcast)) {
+ st_ip_bcast = rrdset_create_localhost(
+ RRD_TYPE_NET_NETSTAT
+ , "bcast"
+ , NULL
+ , "broadcast"
+ , NULL
+ , "IP Broadcast Bandwidth"
+ , "kilobits/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETSTAT_NAME
+ , NETDATA_CHART_PRIO_IP_BCAST
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rrdset_flag_set(st_ip_bcast, RRDSET_FLAG_DETAIL);
+
+ rd_in = rrddim_add(st_ip_bcast, "InBcastOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st_ip_bcast, "OutBcastOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_ip_bcast);
+
+ rrddim_set_by_pointer(st_ip_bcast, rd_in, ipext_InBcastOctets);
+ rrddim_set_by_pointer(st_ip_bcast, rd_out, ipext_OutBcastOctets);
+
+ rrdset_done(st_ip_bcast);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_mcast_p == CONFIG_BOOLEAN_YES || (do_mcast_p == CONFIG_BOOLEAN_AUTO && (ipext_InMcastPkts || ipext_OutMcastPkts))) {
+ do_mcast_p = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_ip_mcastpkts = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL;
+
+ if(unlikely(!st_ip_mcastpkts)) {
+ st_ip_mcastpkts = rrdset_create_localhost(
+ RRD_TYPE_NET_NETSTAT
+ , "mcastpkts"
+ , NULL
+ , "multicast"
+ , NULL
+ , "IP Multicast Packets"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETSTAT_NAME
+ , NETDATA_CHART_PRIO_IP_MCAST_PACKETS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st_ip_mcastpkts, RRDSET_FLAG_DETAIL);
+
+ rd_in = rrddim_add(st_ip_mcastpkts, "InMcastPkts", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st_ip_mcastpkts, "OutMcastPkts", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st_ip_mcastpkts);
+
+ rrddim_set_by_pointer(st_ip_mcastpkts, rd_in, ipext_InMcastPkts);
+ rrddim_set_by_pointer(st_ip_mcastpkts, rd_out, ipext_OutMcastPkts);
+
+ rrdset_done(st_ip_mcastpkts);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_bcast_p == CONFIG_BOOLEAN_YES || (do_bcast_p == CONFIG_BOOLEAN_AUTO && (ipext_InBcastPkts || ipext_OutBcastPkts))) {
+ do_bcast_p = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_ip_bcastpkts = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL;
+
+ if(unlikely(!st_ip_bcastpkts)) {
+ st_ip_bcastpkts = rrdset_create_localhost(
+ RRD_TYPE_NET_NETSTAT
+ , "bcastpkts"
+ , NULL
+ , "broadcast"
+ , NULL
+ , "IP Broadcast Packets"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETSTAT_NAME
+ , NETDATA_CHART_PRIO_IP_BCAST_PACKETS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st_ip_bcastpkts, RRDSET_FLAG_DETAIL);
+
+ rd_in = rrddim_add(st_ip_bcastpkts, "InBcastPkts", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st_ip_bcastpkts, "OutBcastPkts", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_ip_bcastpkts);
+
+ rrddim_set_by_pointer(st_ip_bcastpkts, rd_in, ipext_InBcastPkts);
+ rrddim_set_by_pointer(st_ip_bcastpkts, rd_out, ipext_OutBcastPkts);
+
+ rrdset_done(st_ip_bcastpkts);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_ecn == CONFIG_BOOLEAN_YES || (do_ecn == CONFIG_BOOLEAN_AUTO && (ipext_InCEPkts || ipext_InECT0Pkts || ipext_InECT1Pkts || ipext_InNoECTPkts))) {
+ do_ecn = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_ecnpkts = NULL;
+ static RRDDIM *rd_cep = NULL, *rd_noectp = NULL, *rd_ectp0 = NULL, *rd_ectp1 = NULL;
+
+ if(unlikely(!st_ecnpkts)) {
+ st_ecnpkts = rrdset_create_localhost(
+ RRD_TYPE_NET_NETSTAT
+ , "ecnpkts"
+ , NULL
+ , "ecn"
+ , NULL
+ , "IP ECN Statistics"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETSTAT_NAME
+ , NETDATA_CHART_PRIO_IP_ECN
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st_ecnpkts, RRDSET_FLAG_DETAIL);
+
+ rd_cep = rrddim_add(st_ecnpkts, "InCEPkts", "CEP", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_noectp = rrddim_add(st_ecnpkts, "InNoECTPkts", "NoECTP", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_ectp0 = rrddim_add(st_ecnpkts, "InECT0Pkts", "ECTP0", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_ectp1 = rrddim_add(st_ecnpkts, "InECT1Pkts", "ECTP1", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st_ecnpkts);
+
+ rrddim_set_by_pointer(st_ecnpkts, rd_cep, ipext_InCEPkts);
+ rrddim_set_by_pointer(st_ecnpkts, rd_noectp, ipext_InNoECTPkts);
+ rrddim_set_by_pointer(st_ecnpkts, rd_ectp0, ipext_InECT0Pkts);
+ rrddim_set_by_pointer(st_ecnpkts, rd_ectp1, ipext_InECT1Pkts);
+
+ rrdset_done(st_ecnpkts);
+ }
+ }
+ else if(unlikely(hash == hash_tcpext && strcmp(key, "TcpExt") == 0)) {
+ size_t h = l++;
+
+ words = procfile_linewords(ff, l);
+ if(unlikely(words < 2)) {
+ error("Cannot read /proc/net/netstat TcpExt line. Expected 2+ params, read %zu.", words);
+ continue;
+ }
+
+ parse_line_pair(ff, arl_tcpext, h, l);
+
+ // --------------------------------------------------------------------
+
+ if(do_tcpext_memory == CONFIG_BOOLEAN_YES || (do_tcpext_memory == CONFIG_BOOLEAN_AUTO && (tcpext_TCPMemoryPressures))) {
+ do_tcpext_memory = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_tcpmemorypressures = NULL;
+ static RRDDIM *rd_pressures = NULL;
+
+ if(unlikely(!st_tcpmemorypressures)) {
+ st_tcpmemorypressures = rrdset_create_localhost(
+ RRD_TYPE_NET_NETSTAT
+ , "tcpmemorypressures"
+ , NULL
+ , "tcp"
+ , NULL
+ , "TCP Memory Pressures"
+ , "events/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETSTAT_NAME
+ , NETDATA_CHART_PRIO_IP_TCP_MEM
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_pressures = rrddim_add(st_tcpmemorypressures, "TCPMemoryPressures", "pressures", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_tcpmemorypressures);
+
+ rrddim_set_by_pointer(st_tcpmemorypressures, rd_pressures, tcpext_TCPMemoryPressures);
+
+ rrdset_done(st_tcpmemorypressures);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_tcpext_connaborts == CONFIG_BOOLEAN_YES || (do_tcpext_connaborts == CONFIG_BOOLEAN_AUTO && (tcpext_TCPAbortOnData || tcpext_TCPAbortOnClose || tcpext_TCPAbortOnMemory || tcpext_TCPAbortOnTimeout || tcpext_TCPAbortOnLinger || tcpext_TCPAbortFailed))) {
+ do_tcpext_connaborts = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_tcpconnaborts = NULL;
+ static RRDDIM *rd_baddata = NULL, *rd_userclosed = NULL, *rd_nomemory = NULL, *rd_timeout = NULL, *rd_linger = NULL, *rd_failed = NULL;
+
+ if(unlikely(!st_tcpconnaborts)) {
+ st_tcpconnaborts = rrdset_create_localhost(
+ RRD_TYPE_NET_NETSTAT
+ , "tcpconnaborts"
+ , NULL
+ , "tcp"
+ , NULL
+ , "TCP Connection Aborts"
+ , "connections/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETSTAT_NAME
+ , NETDATA_CHART_PRIO_IP_TCP_CONNABORTS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_baddata = rrddim_add(st_tcpconnaborts, "TCPAbortOnData", "baddata", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_userclosed = rrddim_add(st_tcpconnaborts, "TCPAbortOnClose", "userclosed", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_nomemory = rrddim_add(st_tcpconnaborts, "TCPAbortOnMemory", "nomemory", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_timeout = rrddim_add(st_tcpconnaborts, "TCPAbortOnTimeout", "timeout", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_linger = rrddim_add(st_tcpconnaborts, "TCPAbortOnLinger", "linger", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_failed = rrddim_add(st_tcpconnaborts, "TCPAbortFailed", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_tcpconnaborts);
+
+ rrddim_set_by_pointer(st_tcpconnaborts, rd_baddata, tcpext_TCPAbortOnData);
+ rrddim_set_by_pointer(st_tcpconnaborts, rd_userclosed, tcpext_TCPAbortOnClose);
+ rrddim_set_by_pointer(st_tcpconnaborts, rd_nomemory, tcpext_TCPAbortOnMemory);
+ rrddim_set_by_pointer(st_tcpconnaborts, rd_timeout, tcpext_TCPAbortOnTimeout);
+ rrddim_set_by_pointer(st_tcpconnaborts, rd_linger, tcpext_TCPAbortOnLinger);
+ rrddim_set_by_pointer(st_tcpconnaborts, rd_failed, tcpext_TCPAbortFailed);
+
+ rrdset_done(st_tcpconnaborts);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_tcpext_reorder == CONFIG_BOOLEAN_YES || (do_tcpext_reorder == CONFIG_BOOLEAN_AUTO && (tcpext_TCPRenoReorder || tcpext_TCPFACKReorder || tcpext_TCPSACKReorder || tcpext_TCPTSReorder))) {
+ do_tcpext_reorder = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_tcpreorders = NULL;
+ static RRDDIM *rd_timestamp = NULL, *rd_sack = NULL, *rd_fack = NULL, *rd_reno = NULL;
+
+ if(unlikely(!st_tcpreorders)) {
+ st_tcpreorders = rrdset_create_localhost(
+ RRD_TYPE_NET_NETSTAT
+ , "tcpreorders"
+ , NULL
+ , "tcp"
+ , NULL
+ , "TCP Reordered Packets by Detection Method"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETSTAT_NAME
+ , NETDATA_CHART_PRIO_IP_TCP_REORDERS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_timestamp = rrddim_add(st_tcpreorders, "TCPTSReorder", "timestamp", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_sack = rrddim_add(st_tcpreorders, "TCPSACKReorder", "sack", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_fack = rrddim_add(st_tcpreorders, "TCPFACKReorder", "fack", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_reno = rrddim_add(st_tcpreorders, "TCPRenoReorder", "reno", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_tcpreorders);
+
+ rrddim_set_by_pointer(st_tcpreorders, rd_timestamp, tcpext_TCPTSReorder);
+ rrddim_set_by_pointer(st_tcpreorders, rd_sack, tcpext_TCPSACKReorder);
+ rrddim_set_by_pointer(st_tcpreorders, rd_fack, tcpext_TCPFACKReorder);
+ rrddim_set_by_pointer(st_tcpreorders, rd_reno, tcpext_TCPRenoReorder);
+
+ rrdset_done(st_tcpreorders);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_tcpext_ofo == CONFIG_BOOLEAN_YES || (do_tcpext_ofo == CONFIG_BOOLEAN_AUTO && (tcpext_TCPOFOQueue || tcpext_TCPOFODrop || tcpext_TCPOFOMerge))) {
+ do_tcpext_ofo = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_ip_tcpofo = NULL;
+ static RRDDIM *rd_inqueue = NULL, *rd_dropped = NULL, *rd_merged = NULL, *rd_pruned = NULL;
+
+ if(unlikely(!st_ip_tcpofo)) {
+
+ st_ip_tcpofo = rrdset_create_localhost(
+ RRD_TYPE_NET_NETSTAT
+ , "tcpofo"
+ , NULL
+ , "tcp"
+ , NULL
+ , "TCP Out-Of-Order Queue"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETSTAT_NAME
+ , NETDATA_CHART_PRIO_IP_TCP_OFO
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_inqueue = rrddim_add(st_ip_tcpofo, "TCPOFOQueue", "inqueue", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_dropped = rrddim_add(st_ip_tcpofo, "TCPOFODrop", "dropped", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_merged = rrddim_add(st_ip_tcpofo, "TCPOFOMerge", "merged", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_pruned = rrddim_add(st_ip_tcpofo, "OfoPruned", "pruned", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_ip_tcpofo);
+
+ rrddim_set_by_pointer(st_ip_tcpofo, rd_inqueue, tcpext_TCPOFOQueue);
+ rrddim_set_by_pointer(st_ip_tcpofo, rd_dropped, tcpext_TCPOFODrop);
+ rrddim_set_by_pointer(st_ip_tcpofo, rd_merged, tcpext_TCPOFOMerge);
+ rrddim_set_by_pointer(st_ip_tcpofo, rd_pruned, tcpext_OfoPruned);
+
+ rrdset_done(st_ip_tcpofo);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_tcpext_syscookies == CONFIG_BOOLEAN_YES || (do_tcpext_syscookies == CONFIG_BOOLEAN_AUTO && (tcpext_SyncookiesSent || tcpext_SyncookiesRecv || tcpext_SyncookiesFailed))) {
+ do_tcpext_syscookies = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_syncookies = NULL;
+ static RRDDIM *rd_received = NULL, *rd_sent = NULL, *rd_failed = NULL;
+
+ if(unlikely(!st_syncookies)) {
+
+ st_syncookies = rrdset_create_localhost(
+ RRD_TYPE_NET_NETSTAT
+ , "tcpsyncookies"
+ , NULL
+ , "tcp"
+ , NULL
+ , "TCP SYN Cookies"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETSTAT_NAME
+ , NETDATA_CHART_PRIO_IP_TCP_SYNCOOKIES
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_received = rrddim_add(st_syncookies, "SyncookiesRecv", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_sent = rrddim_add(st_syncookies, "SyncookiesSent", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_failed = rrddim_add(st_syncookies, "SyncookiesFailed", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_syncookies);
+
+ rrddim_set_by_pointer(st_syncookies, rd_received, tcpext_SyncookiesRecv);
+ rrddim_set_by_pointer(st_syncookies, rd_sent, tcpext_SyncookiesSent);
+ rrddim_set_by_pointer(st_syncookies, rd_failed, tcpext_SyncookiesFailed);
+
+ rrdset_done(st_syncookies);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_tcpext_syn_queue == CONFIG_BOOLEAN_YES || (do_tcpext_syn_queue == CONFIG_BOOLEAN_AUTO && (tcpext_TCPReqQFullDrop || tcpext_TCPReqQFullDoCookies))) {
+ do_tcpext_syn_queue = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_syn_queue = NULL;
+ static RRDDIM
+ *rd_TCPReqQFullDrop = NULL,
+ *rd_TCPReqQFullDoCookies = NULL;
+
+ if(unlikely(!st_syn_queue)) {
+
+ st_syn_queue = rrdset_create_localhost(
+ RRD_TYPE_NET_NETSTAT
+ , "tcp_syn_queue"
+ , NULL
+ , "tcp"
+ , NULL
+ , "TCP SYN Queue Issues"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETSTAT_NAME
+ , NETDATA_CHART_PRIO_IP_TCP_SYN_QUEUE
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_TCPReqQFullDrop = rrddim_add(st_syn_queue, "TCPReqQFullDrop", "drops", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_TCPReqQFullDoCookies = rrddim_add(st_syn_queue, "TCPReqQFullDoCookies", "cookies", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_syn_queue);
+
+ rrddim_set_by_pointer(st_syn_queue, rd_TCPReqQFullDrop, tcpext_TCPReqQFullDrop);
+ rrddim_set_by_pointer(st_syn_queue, rd_TCPReqQFullDoCookies, tcpext_TCPReqQFullDoCookies);
+
+ rrdset_done(st_syn_queue);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_tcpext_accept_queue == CONFIG_BOOLEAN_YES || (do_tcpext_accept_queue == CONFIG_BOOLEAN_AUTO && (tcpext_ListenOverflows || tcpext_ListenDrops))) {
+ do_tcpext_accept_queue = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_accept_queue = NULL;
+ static RRDDIM *rd_overflows = NULL,
+ *rd_drops = NULL;
+
+ if(unlikely(!st_accept_queue)) {
+
+ st_accept_queue = rrdset_create_localhost(
+ RRD_TYPE_NET_NETSTAT
+ , "tcp_accept_queue"
+ , NULL
+ , "tcp"
+ , NULL
+ , "TCP Accept Queue Issues"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NETSTAT_NAME
+ , NETDATA_CHART_PRIO_IP_TCP_ACCEPT_QUEUE
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_overflows = rrddim_add(st_accept_queue, "ListenOverflows", "overflows", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_drops = rrddim_add(st_accept_queue, "ListenDrops", "drops", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_accept_queue);
+
+ rrddim_set_by_pointer(st_accept_queue, rd_overflows, tcpext_ListenOverflows);
+ rrddim_set_by_pointer(st_accept_queue, rd_drops, tcpext_ListenDrops);
+
+ rrdset_done(st_accept_queue);
+ }
+
+ }
+ }
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_net_rpc_nfs.c b/collectors/proc.plugin/proc_net_rpc_nfs.c
new file mode 100644
index 000000000..f5702859c
--- /dev/null
+++ b/collectors/proc.plugin/proc_net_rpc_nfs.c
@@ -0,0 +1,454 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_NFS_NAME "/proc/net/rpc/nfs"
+#define CONFIG_SECTION_PLUGIN_PROC_NFS "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_NFS_NAME
+
+struct nfs_procs {
+ char name[30];
+ unsigned long long value;
+ int present;
+ RRDDIM *rd;
+};
+
+struct nfs_procs nfs_proc2_values[] = {
+ { "null" , 0ULL, 0, NULL}
+ , {"getattr" , 0ULL, 0, NULL}
+ , {"setattr" , 0ULL, 0, NULL}
+ , {"root" , 0ULL, 0, NULL}
+ , {"lookup" , 0ULL, 0, NULL}
+ , {"readlink", 0ULL, 0, NULL}
+ , {"read" , 0ULL, 0, NULL}
+ , {"wrcache" , 0ULL, 0, NULL}
+ , {"write" , 0ULL, 0, NULL}
+ , {"create" , 0ULL, 0, NULL}
+ , {"remove" , 0ULL, 0, NULL}
+ , {"rename" , 0ULL, 0, NULL}
+ , {"link" , 0ULL, 0, NULL}
+ , {"symlink" , 0ULL, 0, NULL}
+ , {"mkdir" , 0ULL, 0, NULL}
+ , {"rmdir" , 0ULL, 0, NULL}
+ , {"readdir" , 0ULL, 0, NULL}
+ , {"fsstat" , 0ULL, 0, NULL}
+ ,
+
+ /* termination */
+ { "" , 0ULL, 0, NULL}
+};
+
+struct nfs_procs nfs_proc3_values[] = {
+ { "null" , 0ULL, 0, NULL}
+ , {"getattr" , 0ULL, 0, NULL}
+ , {"setattr" , 0ULL, 0, NULL}
+ , {"lookup" , 0ULL, 0, NULL}
+ , {"access" , 0ULL, 0, NULL}
+ , {"readlink" , 0ULL, 0, NULL}
+ , {"read" , 0ULL, 0, NULL}
+ , {"write" , 0ULL, 0, NULL}
+ , {"create" , 0ULL, 0, NULL}
+ , {"mkdir" , 0ULL, 0, NULL}
+ , {"symlink" , 0ULL, 0, NULL}
+ , {"mknod" , 0ULL, 0, NULL}
+ , {"remove" , 0ULL, 0, NULL}
+ , {"rmdir" , 0ULL, 0, NULL}
+ , {"rename" , 0ULL, 0, NULL}
+ , {"link" , 0ULL, 0, NULL}
+ , {"readdir" , 0ULL, 0, NULL}
+ , {"readdirplus", 0ULL, 0, NULL}
+ , {"fsstat" , 0ULL, 0, NULL}
+ , {"fsinfo" , 0ULL, 0, NULL}
+ , {"pathconf" , 0ULL, 0, NULL}
+ , {"commit" , 0ULL, 0, NULL}
+ ,
+
+ /* termination */
+ { "" , 0ULL, 0, NULL}
+};
+
+struct nfs_procs nfs_proc4_values[] = {
+ { "null" , 0ULL, 0, NULL}
+ , {"read" , 0ULL, 0, NULL}
+ , {"write" , 0ULL, 0, NULL}
+ , {"commit" , 0ULL, 0, NULL}
+ , {"open" , 0ULL, 0, NULL}
+ , {"open_conf" , 0ULL, 0, NULL}
+ , {"open_noat" , 0ULL, 0, NULL}
+ , {"open_dgrd" , 0ULL, 0, NULL}
+ , {"close" , 0ULL, 0, NULL}
+ , {"setattr" , 0ULL, 0, NULL}
+ , {"fsinfo" , 0ULL, 0, NULL}
+ , {"renew" , 0ULL, 0, NULL}
+ , {"setclntid" , 0ULL, 0, NULL}
+ , {"confirm" , 0ULL, 0, NULL}
+ , {"lock" , 0ULL, 0, NULL}
+ , {"lockt" , 0ULL, 0, NULL}
+ , {"locku" , 0ULL, 0, NULL}
+ , {"access" , 0ULL, 0, NULL}
+ , {"getattr" , 0ULL, 0, NULL}
+ , {"lookup" , 0ULL, 0, NULL}
+ , {"lookup_root" , 0ULL, 0, NULL}
+ , {"remove" , 0ULL, 0, NULL}
+ , {"rename" , 0ULL, 0, NULL}
+ , {"link" , 0ULL, 0, NULL}
+ , {"symlink" , 0ULL, 0, NULL}
+ , {"create" , 0ULL, 0, NULL}
+ , {"pathconf" , 0ULL, 0, NULL}
+ , {"statfs" , 0ULL, 0, NULL}
+ , {"readlink" , 0ULL, 0, NULL}
+ , {"readdir" , 0ULL, 0, NULL}
+ , {"server_caps" , 0ULL, 0, NULL}
+ , {"delegreturn" , 0ULL, 0, NULL}
+ , {"getacl" , 0ULL, 0, NULL}
+ , {"setacl" , 0ULL, 0, NULL}
+ , {"fs_locations" , 0ULL, 0, NULL}
+ , {"rel_lkowner" , 0ULL, 0, NULL}
+ , {"secinfo" , 0ULL, 0, NULL}
+ , {"fsid_present" , 0ULL, 0, NULL}
+ ,
+
+ /* nfsv4.1 client ops */
+ { "exchange_id" , 0ULL, 0, NULL}
+ , {"create_session" , 0ULL, 0, NULL}
+ , {"destroy_session" , 0ULL, 0, NULL}
+ , {"sequence" , 0ULL, 0, NULL}
+ , {"get_lease_time" , 0ULL, 0, NULL}
+ , {"reclaim_comp" , 0ULL, 0, NULL}
+ , {"layoutget" , 0ULL, 0, NULL}
+ , {"getdevinfo" , 0ULL, 0, NULL}
+ , {"layoutcommit" , 0ULL, 0, NULL}
+ , {"layoutreturn" , 0ULL, 0, NULL}
+ , {"secinfo_no" , 0ULL, 0, NULL}
+ , {"test_stateid" , 0ULL, 0, NULL}
+ , {"free_stateid" , 0ULL, 0, NULL}
+ , {"getdevicelist" , 0ULL, 0, NULL}
+ , {"bind_conn_to_ses", 0ULL, 0, NULL}
+ , {"destroy_clientid", 0ULL, 0, NULL}
+ ,
+
+ /* nfsv4.2 client ops */
+ { "seek" , 0ULL, 0, NULL}
+ , {"allocate" , 0ULL, 0, NULL}
+ , {"deallocate" , 0ULL, 0, NULL}
+ , {"layoutstats" , 0ULL, 0, NULL}
+ , {"clone" , 0ULL, 0, NULL}
+ ,
+
+ /* termination */
+ { "" , 0ULL, 0, NULL}
+};
+
+int do_proc_net_rpc_nfs(int update_every, usec_t dt) {
+ (void)dt;
+
+ static procfile *ff = NULL;
+ static int do_net = -1, do_rpc = -1, do_proc2 = -1, do_proc3 = -1, do_proc4 = -1;
+ static int proc2_warning = 0, proc3_warning = 0, proc4_warning = 0;
+
+ if(!ff) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/rpc/nfs");
+ ff = procfile_open(config_get(CONFIG_SECTION_PLUGIN_PROC_NFS, "filename to monitor", filename), " \t", PROCFILE_FLAG_DEFAULT);
+ }
+ if(!ff) return 1;
+
+ ff = procfile_readall(ff);
+ if(!ff) return 0; // we return 0, so that we will retry to open it next time
+
+ if(do_net == -1) do_net = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_NFS, "network", 1);
+ if(do_rpc == -1) do_rpc = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_NFS, "rpc", 1);
+ if(do_proc2 == -1) do_proc2 = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_NFS, "NFS v2 procedures", 1);
+ if(do_proc3 == -1) do_proc3 = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_NFS, "NFS v3 procedures", 1);
+ if(do_proc4 == -1) do_proc4 = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_NFS, "NFS v4 procedures", 1);
+
+ // if they are enabled, reset them to 1
+ // later we do them =2 to avoid doing strcmp() for all lines
+ if(do_net) do_net = 1;
+ if(do_rpc) do_rpc = 1;
+ if(do_proc2) do_proc2 = 1;
+ if(do_proc3) do_proc3 = 1;
+ if(do_proc4) do_proc4 = 1;
+
+ size_t lines = procfile_lines(ff), l;
+
+ char *type;
+ unsigned long long net_count = 0, net_udp_count = 0, net_tcp_count = 0, net_tcp_connections = 0;
+ unsigned long long rpc_calls = 0, rpc_retransmits = 0, rpc_auth_refresh = 0;
+
+ for(l = 0; l < lines ;l++) {
+ size_t words = procfile_linewords(ff, l);
+ if(!words) continue;
+
+ type = procfile_lineword(ff, l, 0);
+
+ if(do_net == 1 && strcmp(type, "net") == 0) {
+ if(words < 5) {
+ error("%s line of /proc/net/rpc/nfs has %zu words, expected %d", type, words, 5);
+ continue;
+ }
+
+ net_count = str2ull(procfile_lineword(ff, l, 1));
+ net_udp_count = str2ull(procfile_lineword(ff, l, 2));
+ net_tcp_count = str2ull(procfile_lineword(ff, l, 3));
+ net_tcp_connections = str2ull(procfile_lineword(ff, l, 4));
+
+ unsigned long long sum = net_count + net_udp_count + net_tcp_count + net_tcp_connections;
+ if(sum == 0ULL) do_net = -1;
+ else do_net = 2;
+ }
+ else if(do_rpc == 1 && strcmp(type, "rpc") == 0) {
+ if(words < 4) {
+ error("%s line of /proc/net/rpc/nfs has %zu words, expected %d", type, words, 6);
+ continue;
+ }
+
+ rpc_calls = str2ull(procfile_lineword(ff, l, 1));
+ rpc_retransmits = str2ull(procfile_lineword(ff, l, 2));
+ rpc_auth_refresh = str2ull(procfile_lineword(ff, l, 3));
+
+ unsigned long long sum = rpc_calls + rpc_retransmits + rpc_auth_refresh;
+ if(sum == 0ULL) do_rpc = -1;
+ else do_rpc = 2;
+ }
+ else if(do_proc2 == 1 && strcmp(type, "proc2") == 0) {
+ // the first number is the count of numbers present
+ // so we start for word 2
+
+ unsigned long long sum = 0;
+ unsigned int i, j;
+ for(i = 0, j = 2; j < words && nfs_proc2_values[i].name[0] ; i++, j++) {
+ nfs_proc2_values[i].value = str2ull(procfile_lineword(ff, l, j));
+ nfs_proc2_values[i].present = 1;
+ sum += nfs_proc2_values[i].value;
+ }
+
+ if(sum == 0ULL) {
+ if(!proc2_warning) {
+ error("Disabling /proc/net/rpc/nfs v2 procedure calls chart. It seems unused on this machine. It will be enabled automatically when found with data in it.");
+ proc2_warning = 1;
+ }
+ do_proc2 = 0;
+ }
+ else do_proc2 = 2;
+ }
+ else if(do_proc3 == 1 && strcmp(type, "proc3") == 0) {
+ // the first number is the count of numbers present
+ // so we start for word 2
+
+ unsigned long long sum = 0;
+ unsigned int i, j;
+ for(i = 0, j = 2; j < words && nfs_proc3_values[i].name[0] ; i++, j++) {
+ nfs_proc3_values[i].value = str2ull(procfile_lineword(ff, l, j));
+ nfs_proc3_values[i].present = 1;
+ sum += nfs_proc3_values[i].value;
+ }
+
+ if(sum == 0ULL) {
+ if(!proc3_warning) {
+ info("Disabling /proc/net/rpc/nfs v3 procedure calls chart. It seems unused on this machine. It will be enabled automatically when found with data in it.");
+ proc3_warning = 1;
+ }
+ do_proc3 = 0;
+ }
+ else do_proc3 = 2;
+ }
+ else if(do_proc4 == 1 && strcmp(type, "proc4") == 0) {
+ // the first number is the count of numbers present
+ // so we start for word 2
+
+ unsigned long long sum = 0;
+ unsigned int i, j;
+ for(i = 0, j = 2; j < words && nfs_proc4_values[i].name[0] ; i++, j++) {
+ nfs_proc4_values[i].value = str2ull(procfile_lineword(ff, l, j));
+ nfs_proc4_values[i].present = 1;
+ sum += nfs_proc4_values[i].value;
+ }
+
+ if(sum == 0ULL) {
+ if(!proc4_warning) {
+ info("Disabling /proc/net/rpc/nfs v4 procedure calls chart. It seems unused on this machine. It will be enabled automatically when found with data in it.");
+ proc4_warning = 1;
+ }
+ do_proc4 = 0;
+ }
+ else do_proc4 = 2;
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_net == 2) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_udp = NULL,
+ *rd_tcp = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "nfs"
+ , "net"
+ , NULL
+ , "network"
+ , NULL
+ , "NFS Client Network"
+ , "operations/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NFS_NAME
+ , NETDATA_CHART_PRIO_NFS_NET
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_udp = rrddim_add(st, "udp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_tcp = rrddim_add(st, "tcp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ // ignore net_count, net_tcp_connections
+ (void)net_count;
+ (void)net_tcp_connections;
+
+ rrddim_set_by_pointer(st, rd_udp, net_udp_count);
+ rrddim_set_by_pointer(st, rd_tcp, net_tcp_count);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_rpc == 2) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_calls = NULL,
+ *rd_retransmits = NULL,
+ *rd_auth_refresh = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "nfs"
+ , "rpc"
+ , NULL
+ , "rpc"
+ , NULL
+ , "NFS Client Remote Procedure Calls Statistics"
+ , "calls/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NFS_NAME
+ , NETDATA_CHART_PRIO_NFS_RPC
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_calls = rrddim_add(st, "calls", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_retransmits = rrddim_add(st, "retransmits", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_auth_refresh = rrddim_add(st, "auth_refresh", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_calls, rpc_calls);
+ rrddim_set_by_pointer(st, rd_retransmits, rpc_retransmits);
+ rrddim_set_by_pointer(st, rd_auth_refresh, rpc_auth_refresh);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_proc2 == 2) {
+ static RRDSET *st = NULL;
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "nfs"
+ , "proc2"
+ , NULL
+ , "nfsv2rpc"
+ , NULL
+ , "NFS v2 Client Remote Procedure Calls"
+ , "calls/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NFS_NAME
+ , NETDATA_CHART_PRIO_NFS_PROC2
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+ }
+ else rrdset_next(st);
+
+ size_t i;
+ for(i = 0; nfs_proc2_values[i].present ; i++) {
+ if(unlikely(!nfs_proc2_values[i].rd))
+ nfs_proc2_values[i].rd = rrddim_add(st, nfs_proc2_values[i].name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st, nfs_proc2_values[i].rd, nfs_proc2_values[i].value);
+ }
+
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_proc3 == 2) {
+ static RRDSET *st = NULL;
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "nfs"
+ , "proc3"
+ , NULL
+ , "nfsv3rpc"
+ , NULL
+ , "NFS v3 Client Remote Procedure Calls"
+ , "calls/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NFS_NAME
+ , NETDATA_CHART_PRIO_NFS_PROC3
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+ }
+ else rrdset_next(st);
+
+ size_t i;
+ for(i = 0; nfs_proc3_values[i].present ; i++) {
+ if(unlikely(!nfs_proc3_values[i].rd))
+ nfs_proc3_values[i].rd = rrddim_add(st, nfs_proc3_values[i].name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st, nfs_proc3_values[i].rd, nfs_proc3_values[i].value);
+ }
+
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_proc4 == 2) {
+ static RRDSET *st = NULL;
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "nfs"
+ , "proc4"
+ , NULL
+ , "nfsv4rpc"
+ , NULL
+ , "NFS v4 Client Remote Procedure Calls"
+ , "calls/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NFS_NAME
+ , NETDATA_CHART_PRIO_NFS_PROC4
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+ }
+ else rrdset_next(st);
+
+ size_t i;
+ for(i = 0; nfs_proc4_values[i].present ; i++) {
+ if(unlikely(!nfs_proc4_values[i].rd))
+ nfs_proc4_values[i].rd = rrddim_add(st, nfs_proc4_values[i].name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st, nfs_proc4_values[i].rd, nfs_proc4_values[i].value);
+ }
+
+ rrdset_done(st);
+ }
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_net_rpc_nfsd.c b/collectors/proc.plugin/proc_net_rpc_nfsd.c
new file mode 100644
index 000000000..20b87e9dd
--- /dev/null
+++ b/collectors/proc.plugin/proc_net_rpc_nfsd.c
@@ -0,0 +1,1006 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_NFSD_NAME "/proc/net/rpc/nfsd"
+
+struct nfsd_procs {
+ char name[30];
+ unsigned long long value;
+ int present;
+ RRDDIM *rd;
+};
+
+struct nfsd_procs nfsd_proc2_values[] = {
+ { "null" , 0ULL, 0, NULL}
+ , {"getattr" , 0ULL, 0, NULL}
+ , {"setattr" , 0ULL, 0, NULL}
+ , {"root" , 0ULL, 0, NULL}
+ , {"lookup" , 0ULL, 0, NULL}
+ , {"readlink", 0ULL, 0, NULL}
+ , {"read" , 0ULL, 0, NULL}
+ , {"wrcache" , 0ULL, 0, NULL}
+ , {"write" , 0ULL, 0, NULL}
+ , {"create" , 0ULL, 0, NULL}
+ , {"remove" , 0ULL, 0, NULL}
+ , {"rename" , 0ULL, 0, NULL}
+ , {"link" , 0ULL, 0, NULL}
+ , {"symlink" , 0ULL, 0, NULL}
+ , {"mkdir" , 0ULL, 0, NULL}
+ , {"rmdir" , 0ULL, 0, NULL}
+ , {"readdir" , 0ULL, 0, NULL}
+ , {"fsstat" , 0ULL, 0, NULL}
+ ,
+
+ /* termination */
+ { "" , 0ULL, 0, NULL}
+};
+
+struct nfsd_procs nfsd_proc3_values[] = {
+ { "null" , 0ULL, 0, NULL}
+ , {"getattr" , 0ULL, 0, NULL}
+ , {"setattr" , 0ULL, 0, NULL}
+ , {"lookup" , 0ULL, 0, NULL}
+ , {"access" , 0ULL, 0, NULL}
+ , {"readlink" , 0ULL, 0, NULL}
+ , {"read" , 0ULL, 0, NULL}
+ , {"write" , 0ULL, 0, NULL}
+ , {"create" , 0ULL, 0, NULL}
+ , {"mkdir" , 0ULL, 0, NULL}
+ , {"symlink" , 0ULL, 0, NULL}
+ , {"mknod" , 0ULL, 0, NULL}
+ , {"remove" , 0ULL, 0, NULL}
+ , {"rmdir" , 0ULL, 0, NULL}
+ , {"rename" , 0ULL, 0, NULL}
+ , {"link" , 0ULL, 0, NULL}
+ , {"readdir" , 0ULL, 0, NULL}
+ , {"readdirplus", 0ULL, 0, NULL}
+ , {"fsstat" , 0ULL, 0, NULL}
+ , {"fsinfo" , 0ULL, 0, NULL}
+ , {"pathconf" , 0ULL, 0, NULL}
+ , {"commit" , 0ULL, 0, NULL}
+ ,
+
+ /* termination */
+ { "" , 0ULL, 0, NULL}
+};
+
+struct nfsd_procs nfsd_proc4_values[] = {
+ { "null" , 0ULL, 0, NULL}
+ , {"read" , 0ULL, 0, NULL}
+ , {"write" , 0ULL, 0, NULL}
+ , {"commit" , 0ULL, 0, NULL}
+ , {"open" , 0ULL, 0, NULL}
+ , {"open_conf" , 0ULL, 0, NULL}
+ , {"open_noat" , 0ULL, 0, NULL}
+ , {"open_dgrd" , 0ULL, 0, NULL}
+ , {"close" , 0ULL, 0, NULL}
+ , {"setattr" , 0ULL, 0, NULL}
+ , {"fsinfo" , 0ULL, 0, NULL}
+ , {"renew" , 0ULL, 0, NULL}
+ , {"setclntid" , 0ULL, 0, NULL}
+ , {"confirm" , 0ULL, 0, NULL}
+ , {"lock" , 0ULL, 0, NULL}
+ , {"lockt" , 0ULL, 0, NULL}
+ , {"locku" , 0ULL, 0, NULL}
+ , {"access" , 0ULL, 0, NULL}
+ , {"getattr" , 0ULL, 0, NULL}
+ , {"lookup" , 0ULL, 0, NULL}
+ , {"lookup_root" , 0ULL, 0, NULL}
+ , {"remove" , 0ULL, 0, NULL}
+ , {"rename" , 0ULL, 0, NULL}
+ , {"link" , 0ULL, 0, NULL}
+ , {"symlink" , 0ULL, 0, NULL}
+ , {"create" , 0ULL, 0, NULL}
+ , {"pathconf" , 0ULL, 0, NULL}
+ , {"statfs" , 0ULL, 0, NULL}
+ , {"readlink" , 0ULL, 0, NULL}
+ , {"readdir" , 0ULL, 0, NULL}
+ , {"server_caps" , 0ULL, 0, NULL}
+ , {"delegreturn" , 0ULL, 0, NULL}
+ , {"getacl" , 0ULL, 0, NULL}
+ , {"setacl" , 0ULL, 0, NULL}
+ , {"fs_locations" , 0ULL, 0, NULL}
+ , {"rel_lkowner" , 0ULL, 0, NULL}
+ , {"secinfo" , 0ULL, 0, NULL}
+ , {"fsid_present" , 0ULL, 0, NULL}
+ ,
+
+ /* nfsv4.1 client ops */
+ { "exchange_id" , 0ULL, 0, NULL}
+ , {"create_session" , 0ULL, 0, NULL}
+ , {"destroy_session" , 0ULL, 0, NULL}
+ , {"sequence" , 0ULL, 0, NULL}
+ , {"get_lease_time" , 0ULL, 0, NULL}
+ , {"reclaim_comp" , 0ULL, 0, NULL}
+ , {"layoutget" , 0ULL, 0, NULL}
+ , {"getdevinfo" , 0ULL, 0, NULL}
+ , {"layoutcommit" , 0ULL, 0, NULL}
+ , {"layoutreturn" , 0ULL, 0, NULL}
+ , {"secinfo_no" , 0ULL, 0, NULL}
+ , {"test_stateid" , 0ULL, 0, NULL}
+ , {"free_stateid" , 0ULL, 0, NULL}
+ , {"getdevicelist" , 0ULL, 0, NULL}
+ , {"bind_conn_to_ses", 0ULL, 0, NULL}
+ , {"destroy_clientid", 0ULL, 0, NULL}
+ ,
+
+ /* nfsv4.2 client ops */
+ { "seek" , 0ULL, 0, NULL}
+ , {"allocate" , 0ULL, 0, NULL}
+ , {"deallocate" , 0ULL, 0, NULL}
+ , {"layoutstats" , 0ULL, 0, NULL}
+ , {"clone" , 0ULL, 0, NULL}
+ ,
+
+ /* termination */
+ { "" , 0ULL, 0, NULL}
+};
+
+struct nfsd_procs nfsd4_ops_values[] = {
+ { "unused_op0" , 0ULL, 0, NULL}
+ , {"unused_op1" , 0ULL, 0, NULL}
+ , {"future_op2" , 0ULL, 0, NULL}
+ , {"access" , 0ULL, 0, NULL}
+ , {"close" , 0ULL, 0, NULL}
+ , {"commit" , 0ULL, 0, NULL}
+ , {"create" , 0ULL, 0, NULL}
+ , {"delegpurge" , 0ULL, 0, NULL}
+ , {"delegreturn" , 0ULL, 0, NULL}
+ , {"getattr" , 0ULL, 0, NULL}
+ , {"getfh" , 0ULL, 0, NULL}
+ , {"link" , 0ULL, 0, NULL}
+ , {"lock" , 0ULL, 0, NULL}
+ , {"lockt" , 0ULL, 0, NULL}
+ , {"locku" , 0ULL, 0, NULL}
+ , {"lookup" , 0ULL, 0, NULL}
+ , {"lookup_root" , 0ULL, 0, NULL}
+ , {"nverify" , 0ULL, 0, NULL}
+ , {"open" , 0ULL, 0, NULL}
+ , {"openattr" , 0ULL, 0, NULL}
+ , {"open_confirm" , 0ULL, 0, NULL}
+ , {"open_downgrade" , 0ULL, 0, NULL}
+ , {"putfh" , 0ULL, 0, NULL}
+ , {"putpubfh" , 0ULL, 0, NULL}
+ , {"putrootfh" , 0ULL, 0, NULL}
+ , {"read" , 0ULL, 0, NULL}
+ , {"readdir" , 0ULL, 0, NULL}
+ , {"readlink" , 0ULL, 0, NULL}
+ , {"remove" , 0ULL, 0, NULL}
+ , {"rename" , 0ULL, 0, NULL}
+ , {"renew" , 0ULL, 0, NULL}
+ , {"restorefh" , 0ULL, 0, NULL}
+ , {"savefh" , 0ULL, 0, NULL}
+ , {"secinfo" , 0ULL, 0, NULL}
+ , {"setattr" , 0ULL, 0, NULL}
+ , {"setclientid" , 0ULL, 0, NULL}
+ , {"setclientid_confirm" , 0ULL, 0, NULL}
+ , {"verify" , 0ULL, 0, NULL}
+ , {"write" , 0ULL, 0, NULL}
+ , {"release_lockowner" , 0ULL, 0, NULL}
+ ,
+
+ /* nfs41 */
+ { "backchannel_ctl" , 0ULL, 0, NULL}
+ , {"bind_conn_to_session", 0ULL, 0, NULL}
+ , {"exchange_id" , 0ULL, 0, NULL}
+ , {"create_session" , 0ULL, 0, NULL}
+ , {"destroy_session" , 0ULL, 0, NULL}
+ , {"free_stateid" , 0ULL, 0, NULL}
+ , {"get_dir_delegation" , 0ULL, 0, NULL}
+ , {"getdeviceinfo" , 0ULL, 0, NULL}
+ , {"getdevicelist" , 0ULL, 0, NULL}
+ , {"layoutcommit" , 0ULL, 0, NULL}
+ , {"layoutget" , 0ULL, 0, NULL}
+ , {"layoutreturn" , 0ULL, 0, NULL}
+ , {"secinfo_no_name" , 0ULL, 0, NULL}
+ , {"sequence" , 0ULL, 0, NULL}
+ , {"set_ssv" , 0ULL, 0, NULL}
+ , {"test_stateid" , 0ULL, 0, NULL}
+ , {"want_delegation" , 0ULL, 0, NULL}
+ , {"destroy_clientid" , 0ULL, 0, NULL}
+ , {"reclaim_complete" , 0ULL, 0, NULL}
+ ,
+
+ /* nfs42 */
+ { "allocate" , 0ULL, 0, NULL}
+ , {"copy" , 0ULL, 0, NULL}
+ , {"copy_notify" , 0ULL, 0, NULL}
+ , {"deallocate" , 0ULL, 0, NULL}
+ , {"ioadvise" , 0ULL, 0, NULL}
+ , {"layouterror" , 0ULL, 0, NULL}
+ , {"layoutstats" , 0ULL, 0, NULL}
+ , {"offload_cancel" , 0ULL, 0, NULL}
+ , {"offload_status" , 0ULL, 0, NULL}
+ , {"read_plus" , 0ULL, 0, NULL}
+ , {"seek" , 0ULL, 0, NULL}
+ , {"write_same" , 0ULL, 0, NULL}
+ ,
+
+ /* termination */
+ { "" , 0ULL, 0, NULL}
+};
+
+
+int do_proc_net_rpc_nfsd(int update_every, usec_t dt) {
+ (void)dt;
+ static procfile *ff = NULL;
+ static int do_rc = -1, do_fh = -1, do_io = -1, do_th = -1, do_ra = -1, do_net = -1, do_rpc = -1, do_proc2 = -1, do_proc3 = -1, do_proc4 = -1, do_proc4ops = -1;
+ static int ra_warning = 0, th_warning = 0, proc2_warning = 0, proc3_warning = 0, proc4_warning = 0, proc4ops_warning = 0;
+
+ if(unlikely(!ff)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/rpc/nfsd");
+ ff = procfile_open(config_get("plugin:proc:/proc/net/rpc/nfsd", "filename to monitor", filename), " \t", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time
+
+ if(unlikely(do_rc == -1)) {
+ do_rc = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "read cache", 1);
+ do_fh = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "file handles", 1);
+ do_io = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "I/O", 1);
+ do_th = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "threads", 1);
+ do_ra = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "read ahead", 1);
+ do_net = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "network", 1);
+ do_rpc = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "rpc", 1);
+ do_proc2 = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "NFS v2 procedures", 1);
+ do_proc3 = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "NFS v3 procedures", 1);
+ do_proc4 = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "NFS v4 procedures", 1);
+ do_proc4ops = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "NFS v4 operations", 1);
+ }
+
+ // if they are enabled, reset them to 1
+ // later we do them = 2 to avoid doing strcmp() for all lines
+ if(do_rc) do_rc = 1;
+ if(do_fh) do_fh = 1;
+ if(do_io) do_io = 1;
+ if(do_th) do_th = 1;
+ if(do_ra) do_ra = 1;
+ if(do_net) do_net = 1;
+ if(do_rpc) do_rpc = 1;
+ if(do_proc2) do_proc2 = 1;
+ if(do_proc3) do_proc3 = 1;
+ if(do_proc4) do_proc4 = 1;
+ if(do_proc4ops) do_proc4ops = 1;
+
+ size_t lines = procfile_lines(ff), l;
+
+ char *type;
+ unsigned long long rc_hits = 0, rc_misses = 0, rc_nocache = 0;
+ unsigned long long fh_stale = 0, fh_total_lookups = 0, fh_anonymous_lookups = 0, fh_dir_not_in_dcache = 0, fh_non_dir_not_in_dcache = 0;
+ unsigned long long io_read = 0, io_write = 0;
+ unsigned long long th_threads = 0, th_fullcnt = 0, th_hist10 = 0, th_hist20 = 0, th_hist30 = 0, th_hist40 = 0, th_hist50 = 0, th_hist60 = 0, th_hist70 = 0, th_hist80 = 0, th_hist90 = 0, th_hist100 = 0;
+ unsigned long long ra_size = 0, ra_hist10 = 0, ra_hist20 = 0, ra_hist30 = 0, ra_hist40 = 0, ra_hist50 = 0, ra_hist60 = 0, ra_hist70 = 0, ra_hist80 = 0, ra_hist90 = 0, ra_hist100 = 0, ra_none = 0;
+ unsigned long long net_count = 0, net_udp_count = 0, net_tcp_count = 0, net_tcp_connections = 0;
+ unsigned long long rpc_calls = 0, rpc_bad_format = 0, rpc_bad_auth = 0, rpc_bad_client = 0;
+
+ for(l = 0; l < lines ;l++) {
+ size_t words = procfile_linewords(ff, l);
+ if(unlikely(!words)) continue;
+
+ type = procfile_lineword(ff, l, 0);
+
+ if(do_rc == 1 && strcmp(type, "rc") == 0) {
+ if(unlikely(words < 4)) {
+ error("%s line of /proc/net/rpc/nfsd has %zu words, expected %d", type, words, 4);
+ continue;
+ }
+
+ rc_hits = str2ull(procfile_lineword(ff, l, 1));
+ rc_misses = str2ull(procfile_lineword(ff, l, 2));
+ rc_nocache = str2ull(procfile_lineword(ff, l, 3));
+
+ unsigned long long sum = rc_hits + rc_misses + rc_nocache;
+ if(sum == 0ULL) do_rc = -1;
+ else do_rc = 2;
+ }
+ else if(do_fh == 1 && strcmp(type, "fh") == 0) {
+ if(unlikely(words < 6)) {
+ error("%s line of /proc/net/rpc/nfsd has %zu words, expected %d", type, words, 6);
+ continue;
+ }
+
+ fh_stale = str2ull(procfile_lineword(ff, l, 1));
+ fh_total_lookups = str2ull(procfile_lineword(ff, l, 2));
+ fh_anonymous_lookups = str2ull(procfile_lineword(ff, l, 3));
+ fh_dir_not_in_dcache = str2ull(procfile_lineword(ff, l, 4));
+ fh_non_dir_not_in_dcache = str2ull(procfile_lineword(ff, l, 5));
+
+ unsigned long long sum = fh_stale + fh_total_lookups + fh_anonymous_lookups + fh_dir_not_in_dcache + fh_non_dir_not_in_dcache;
+ if(sum == 0ULL) do_fh = -1;
+ else do_fh = 2;
+ }
+ else if(do_io == 1 && strcmp(type, "io") == 0) {
+ if(unlikely(words < 3)) {
+ error("%s line of /proc/net/rpc/nfsd has %zu words, expected %d", type, words, 3);
+ continue;
+ }
+
+ io_read = str2ull(procfile_lineword(ff, l, 1));
+ io_write = str2ull(procfile_lineword(ff, l, 2));
+
+ unsigned long long sum = io_read + io_write;
+ if(sum == 0ULL) do_io = -1;
+ else do_io = 2;
+ }
+ else if(do_th == 1 && strcmp(type, "th") == 0) {
+ if(unlikely(words < 13)) {
+ error("%s line of /proc/net/rpc/nfsd has %zu words, expected %d", type, words, 13);
+ continue;
+ }
+
+ th_threads = str2ull(procfile_lineword(ff, l, 1));
+ th_fullcnt = str2ull(procfile_lineword(ff, l, 2));
+ th_hist10 = (unsigned long long)(atof(procfile_lineword(ff, l, 3)) * 1000.0);
+ th_hist20 = (unsigned long long)(atof(procfile_lineword(ff, l, 4)) * 1000.0);
+ th_hist30 = (unsigned long long)(atof(procfile_lineword(ff, l, 5)) * 1000.0);
+ th_hist40 = (unsigned long long)(atof(procfile_lineword(ff, l, 6)) * 1000.0);
+ th_hist50 = (unsigned long long)(atof(procfile_lineword(ff, l, 7)) * 1000.0);
+ th_hist60 = (unsigned long long)(atof(procfile_lineword(ff, l, 8)) * 1000.0);
+ th_hist70 = (unsigned long long)(atof(procfile_lineword(ff, l, 9)) * 1000.0);
+ th_hist80 = (unsigned long long)(atof(procfile_lineword(ff, l, 10)) * 1000.0);
+ th_hist90 = (unsigned long long)(atof(procfile_lineword(ff, l, 11)) * 1000.0);
+ th_hist100 = (unsigned long long)(atof(procfile_lineword(ff, l, 12)) * 1000.0);
+
+ // threads histogram has been disabled on recent kernels
+ // http://permalink.gmane.org/gmane.linux.nfs/24528
+ unsigned long long sum = th_hist10 + th_hist20 + th_hist30 + th_hist40 + th_hist50 + th_hist60 + th_hist70 + th_hist80 + th_hist90 + th_hist100;
+ if(sum == 0ULL) {
+ if(!th_warning) {
+ info("Disabling /proc/net/rpc/nfsd threads histogram. It seems unused on this machine. It will be enabled automatically when found with data in it.");
+ th_warning = 1;
+ }
+ do_th = -1;
+ }
+ else do_th = 2;
+ }
+ else if(do_ra == 1 && strcmp(type, "ra") == 0) {
+ if(unlikely(words < 13)) {
+ error("%s line of /proc/net/rpc/nfsd has %zu words, expected %d", type, words, 13);
+ continue;
+ }
+
+ ra_size = str2ull(procfile_lineword(ff, l, 1));
+ ra_hist10 = str2ull(procfile_lineword(ff, l, 2));
+ ra_hist20 = str2ull(procfile_lineword(ff, l, 3));
+ ra_hist30 = str2ull(procfile_lineword(ff, l, 4));
+ ra_hist40 = str2ull(procfile_lineword(ff, l, 5));
+ ra_hist50 = str2ull(procfile_lineword(ff, l, 6));
+ ra_hist60 = str2ull(procfile_lineword(ff, l, 7));
+ ra_hist70 = str2ull(procfile_lineword(ff, l, 8));
+ ra_hist80 = str2ull(procfile_lineword(ff, l, 9));
+ ra_hist90 = str2ull(procfile_lineword(ff, l, 10));
+ ra_hist100 = str2ull(procfile_lineword(ff, l, 11));
+ ra_none = str2ull(procfile_lineword(ff, l, 12));
+
+ unsigned long long sum = ra_hist10 + ra_hist20 + ra_hist30 + ra_hist40 + ra_hist50 + ra_hist60 + ra_hist70 + ra_hist80 + ra_hist90 + ra_hist100 + ra_none;
+ if(sum == 0ULL) {
+ if(!ra_warning) {
+ info("Disabling /proc/net/rpc/nfsd read ahead histogram. It seems unused on this machine. It will be enabled automatically when found with data in it.");
+ ra_warning = 1;
+ }
+ do_ra = -1;
+ }
+ else do_ra = 2;
+ }
+ else if(do_net == 1 && strcmp(type, "net") == 0) {
+ if(unlikely(words < 5)) {
+ error("%s line of /proc/net/rpc/nfsd has %zu words, expected %d", type, words, 5);
+ continue;
+ }
+
+ net_count = str2ull(procfile_lineword(ff, l, 1));
+ net_udp_count = str2ull(procfile_lineword(ff, l, 2));
+ net_tcp_count = str2ull(procfile_lineword(ff, l, 3));
+ net_tcp_connections = str2ull(procfile_lineword(ff, l, 4));
+
+ unsigned long long sum = net_count + net_udp_count + net_tcp_count + net_tcp_connections;
+ if(sum == 0ULL) do_net = -1;
+ else do_net = 2;
+ }
+ else if(do_rpc == 1 && strcmp(type, "rpc") == 0) {
+ if(unlikely(words < 6)) {
+ error("%s line of /proc/net/rpc/nfsd has %zu words, expected %d", type, words, 6);
+ continue;
+ }
+
+ rpc_calls = str2ull(procfile_lineword(ff, l, 1));
+ rpc_bad_format = str2ull(procfile_lineword(ff, l, 2));
+ rpc_bad_auth = str2ull(procfile_lineword(ff, l, 3));
+ rpc_bad_client = str2ull(procfile_lineword(ff, l, 4));
+
+ unsigned long long sum = rpc_calls + rpc_bad_format + rpc_bad_auth + rpc_bad_client;
+ if(sum == 0ULL) do_rpc = -1;
+ else do_rpc = 2;
+ }
+ else if(do_proc2 == 1 && strcmp(type, "proc2") == 0) {
+ // the first number is the count of numbers present
+ // so we start for word 2
+
+ unsigned long long sum = 0;
+ unsigned int i, j;
+ for(i = 0, j = 2; j < words && nfsd_proc2_values[i].name[0] ; i++, j++) {
+ nfsd_proc2_values[i].value = str2ull(procfile_lineword(ff, l, j));
+ nfsd_proc2_values[i].present = 1;
+ sum += nfsd_proc2_values[i].value;
+ }
+
+ if(sum == 0ULL) {
+ if(!proc2_warning) {
+ error("Disabling /proc/net/rpc/nfsd v2 procedure calls chart. It seems unused on this machine. It will be enabled automatically when found with data in it.");
+ proc2_warning = 1;
+ }
+ do_proc2 = 0;
+ }
+ else do_proc2 = 2;
+ }
+ else if(do_proc3 == 1 && strcmp(type, "proc3") == 0) {
+ // the first number is the count of numbers present
+ // so we start for word 2
+
+ unsigned long long sum = 0;
+ unsigned int i, j;
+ for(i = 0, j = 2; j < words && nfsd_proc3_values[i].name[0] ; i++, j++) {
+ nfsd_proc3_values[i].value = str2ull(procfile_lineword(ff, l, j));
+ nfsd_proc3_values[i].present = 1;
+ sum += nfsd_proc3_values[i].value;
+ }
+
+ if(sum == 0ULL) {
+ if(!proc3_warning) {
+ info("Disabling /proc/net/rpc/nfsd v3 procedure calls chart. It seems unused on this machine. It will be enabled automatically when found with data in it.");
+ proc3_warning = 1;
+ }
+ do_proc3 = 0;
+ }
+ else do_proc3 = 2;
+ }
+ else if(do_proc4 == 1 && strcmp(type, "proc4") == 0) {
+ // the first number is the count of numbers present
+ // so we start for word 2
+
+ unsigned long long sum = 0;
+ unsigned int i, j;
+ for(i = 0, j = 2; j < words && nfsd_proc4_values[i].name[0] ; i++, j++) {
+ nfsd_proc4_values[i].value = str2ull(procfile_lineword(ff, l, j));
+ nfsd_proc4_values[i].present = 1;
+ sum += nfsd_proc4_values[i].value;
+ }
+
+ if(sum == 0ULL) {
+ if(!proc4_warning) {
+ info("Disabling /proc/net/rpc/nfsd v4 procedure calls chart. It seems unused on this machine. It will be enabled automatically when found with data in it.");
+ proc4_warning = 1;
+ }
+ do_proc4 = 0;
+ }
+ else do_proc4 = 2;
+ }
+ else if(do_proc4ops == 1 && strcmp(type, "proc4ops") == 0) {
+ // the first number is the count of numbers present
+ // so we start for word 2
+
+ unsigned long long sum = 0;
+ unsigned int i, j;
+ for(i = 0, j = 2; j < words && nfsd4_ops_values[i].name[0] ; i++, j++) {
+ nfsd4_ops_values[i].value = str2ull(procfile_lineword(ff, l, j));
+ nfsd4_ops_values[i].present = 1;
+ sum += nfsd4_ops_values[i].value;
+ }
+
+ if(sum == 0ULL) {
+ if(!proc4ops_warning) {
+ info("Disabling /proc/net/rpc/nfsd v4 operations chart. It seems unused on this machine. It will be enabled automatically when found with data in it.");
+ proc4ops_warning = 1;
+ }
+ do_proc4ops = 0;
+ }
+ else do_proc4ops = 2;
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_rc == 2) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_hits = NULL,
+ *rd_misses = NULL,
+ *rd_nocache = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "nfsd"
+ , "readcache"
+ , NULL
+ , "cache"
+ , NULL
+ , "NFS Server Read Cache"
+ , "reads/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NFSD_NAME
+ , NETDATA_CHART_PRIO_NFSD_READCACHE
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_hits = rrddim_add(st, "hits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_misses = rrddim_add(st, "misses", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_nocache = rrddim_add(st, "nocache", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_hits, rc_hits);
+ rrddim_set_by_pointer(st, rd_misses, rc_misses);
+ rrddim_set_by_pointer(st, rd_nocache, rc_nocache);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_fh == 2) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_stale = NULL,
+ *rd_total_lookups = NULL,
+ *rd_anonymous_lookups = NULL,
+ *rd_dir_not_in_dcache = NULL,
+ *rd_non_dir_not_in_dcache = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "nfsd"
+ , "filehandles"
+ , NULL
+ , "filehandles"
+ , NULL
+ , "NFS Server File Handles"
+ , "handles/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NFSD_NAME
+ , NETDATA_CHART_PRIO_NFSD_FILEHANDLES
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_stale = rrddim_add(st, "stale", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rd_total_lookups = rrddim_add(st, "total_lookups", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_anonymous_lookups = rrddim_add(st, "anonymous_lookups", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_dir_not_in_dcache = rrddim_add(st, "dir_not_in_dcache", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_non_dir_not_in_dcache = rrddim_add(st, "non_dir_not_in_dcache", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_stale, fh_stale);
+ rrddim_set_by_pointer(st, rd_total_lookups, fh_total_lookups);
+ rrddim_set_by_pointer(st, rd_anonymous_lookups, fh_anonymous_lookups);
+ rrddim_set_by_pointer(st, rd_dir_not_in_dcache, fh_dir_not_in_dcache);
+ rrddim_set_by_pointer(st, rd_non_dir_not_in_dcache, fh_non_dir_not_in_dcache);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_io == 2) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_read = NULL,
+ *rd_write = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "nfsd"
+ , "io"
+ , NULL
+ , "io"
+ , NULL
+ , "NFS Server I/O"
+ , "kilobytes/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NFSD_NAME
+ , NETDATA_CHART_PRIO_NFSD_IO
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_read = rrddim_add(st, "read", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ rd_write = rrddim_add(st, "write", NULL, -1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_read, io_read);
+ rrddim_set_by_pointer(st, rd_write, io_write);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_th == 2) {
+ {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_threads = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "nfsd"
+ , "threads"
+ , NULL
+ , "threads"
+ , NULL
+ , "NFS Server Threads"
+ , "threads"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NFSD_NAME
+ , NETDATA_CHART_PRIO_NFSD_THREADS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_threads = rrddim_add(st, "threads", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_threads, th_threads);
+ rrdset_done(st);
+ }
+
+ {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_full_count = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "nfsd"
+ , "threads_fullcnt"
+ , NULL
+ , "threads"
+ , NULL
+ , "NFS Server Threads Full Count"
+ , "ops/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NFSD_NAME
+ , NETDATA_CHART_PRIO_NFSD_THREADS_FULLCNT
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_full_count = rrddim_add(st, "full_count", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_full_count, th_fullcnt);
+ rrdset_done(st);
+ }
+
+ {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_th_hist10 = NULL,
+ *rd_th_hist20 = NULL,
+ *rd_th_hist30 = NULL,
+ *rd_th_hist40 = NULL,
+ *rd_th_hist50 = NULL,
+ *rd_th_hist60 = NULL,
+ *rd_th_hist70 = NULL,
+ *rd_th_hist80 = NULL,
+ *rd_th_hist90 = NULL,
+ *rd_th_hist100 = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "nfsd"
+ , "threads_histogram"
+ , NULL
+ , "threads"
+ , NULL
+ , "NFS Server Threads Usage Histogram"
+ , "percentage"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NFSD_NAME
+ , NETDATA_CHART_PRIO_NFSD_THREADS_HISTOGRAM
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_th_hist10 = rrddim_add(st, "0%-10%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ rd_th_hist20 = rrddim_add(st, "10%-20%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ rd_th_hist30 = rrddim_add(st, "20%-30%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ rd_th_hist40 = rrddim_add(st, "30%-40%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ rd_th_hist50 = rrddim_add(st, "40%-50%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ rd_th_hist60 = rrddim_add(st, "50%-60%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ rd_th_hist70 = rrddim_add(st, "60%-70%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ rd_th_hist80 = rrddim_add(st, "70%-80%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ rd_th_hist90 = rrddim_add(st, "80%-90%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ rd_th_hist100 = rrddim_add(st, "90%-100%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_th_hist10, th_hist10);
+ rrddim_set_by_pointer(st, rd_th_hist20, th_hist20);
+ rrddim_set_by_pointer(st, rd_th_hist30, th_hist30);
+ rrddim_set_by_pointer(st, rd_th_hist40, th_hist40);
+ rrddim_set_by_pointer(st, rd_th_hist50, th_hist50);
+ rrddim_set_by_pointer(st, rd_th_hist60, th_hist60);
+ rrddim_set_by_pointer(st, rd_th_hist70, th_hist70);
+ rrddim_set_by_pointer(st, rd_th_hist80, th_hist80);
+ rrddim_set_by_pointer(st, rd_th_hist90, th_hist90);
+ rrddim_set_by_pointer(st, rd_th_hist100, th_hist100);
+ rrdset_done(st);
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_ra == 2) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_ra_hist10 = NULL,
+ *rd_ra_hist20 = NULL,
+ *rd_ra_hist30 = NULL,
+ *rd_ra_hist40 = NULL,
+ *rd_ra_hist50 = NULL,
+ *rd_ra_hist60 = NULL,
+ *rd_ra_hist70 = NULL,
+ *rd_ra_hist80 = NULL,
+ *rd_ra_hist90 = NULL,
+ *rd_ra_hist100 = NULL,
+ *rd_ra_none = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "nfsd"
+ , "readahead"
+ , NULL
+ , "readahead"
+ , NULL
+ , "NFS Server Read Ahead Depth"
+ , "percentage"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NFSD_NAME
+ , NETDATA_CHART_PRIO_NFSD_READAHEAD
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_ra_hist10 = rrddim_add(st, "10%", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_ra_hist20 = rrddim_add(st, "20%", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_ra_hist30 = rrddim_add(st, "30%", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_ra_hist40 = rrddim_add(st, "40%", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_ra_hist50 = rrddim_add(st, "50%", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_ra_hist60 = rrddim_add(st, "60%", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_ra_hist70 = rrddim_add(st, "70%", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_ra_hist80 = rrddim_add(st, "80%", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_ra_hist90 = rrddim_add(st, "90%", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_ra_hist100 = rrddim_add(st, "100%", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_ra_none = rrddim_add(st, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ }
+ else rrdset_next(st);
+
+ // ignore ra_size
+ (void)ra_size;
+
+ rrddim_set_by_pointer(st, rd_ra_hist10, ra_hist10);
+ rrddim_set_by_pointer(st, rd_ra_hist20, ra_hist20);
+ rrddim_set_by_pointer(st, rd_ra_hist30, ra_hist30);
+ rrddim_set_by_pointer(st, rd_ra_hist40, ra_hist40);
+ rrddim_set_by_pointer(st, rd_ra_hist50, ra_hist50);
+ rrddim_set_by_pointer(st, rd_ra_hist60, ra_hist60);
+ rrddim_set_by_pointer(st, rd_ra_hist70, ra_hist70);
+ rrddim_set_by_pointer(st, rd_ra_hist80, ra_hist80);
+ rrddim_set_by_pointer(st, rd_ra_hist90, ra_hist90);
+ rrddim_set_by_pointer(st, rd_ra_hist100,ra_hist100);
+ rrddim_set_by_pointer(st, rd_ra_none, ra_none);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_net == 2) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_udp = NULL,
+ *rd_tcp = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "nfsd"
+ , "net"
+ , NULL
+ , "network"
+ , NULL
+ , "NFS Server Network Statistics"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NFSD_NAME
+ , NETDATA_CHART_PRIO_NFSD_NET
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_udp = rrddim_add(st, "udp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_tcp = rrddim_add(st, "tcp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ // ignore net_count, net_tcp_connections
+ (void)net_count;
+ (void)net_tcp_connections;
+
+ rrddim_set_by_pointer(st, rd_udp, net_udp_count);
+ rrddim_set_by_pointer(st, rd_tcp, net_tcp_count);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_rpc == 2) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_calls = NULL,
+ *rd_bad_format = NULL,
+ *rd_bad_auth = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "nfsd"
+ , "rpc"
+ , NULL
+ , "rpc"
+ , NULL
+ , "NFS Server Remote Procedure Calls Statistics"
+ , "calls/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NFSD_NAME
+ , NETDATA_CHART_PRIO_NFSD_RPC
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_calls = rrddim_add(st, "calls", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_bad_format = rrddim_add(st, "bad_format", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_bad_auth = rrddim_add(st, "bad_auth", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ // ignore rpc_bad_client
+ (void)rpc_bad_client;
+
+ rrddim_set_by_pointer(st, rd_calls, rpc_calls);
+ rrddim_set_by_pointer(st, rd_bad_format, rpc_bad_format);
+ rrddim_set_by_pointer(st, rd_bad_auth, rpc_bad_auth);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_proc2 == 2) {
+ static RRDSET *st = NULL;
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "nfsd"
+ , "proc2"
+ , NULL
+ , "nfsv2rpc"
+ , NULL
+ , "NFS v2 Server Remote Procedure Calls"
+ , "calls/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NFSD_NAME
+ , NETDATA_CHART_PRIO_NFSD_PROC2
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+ }
+ else rrdset_next(st);
+
+ size_t i;
+ for(i = 0; nfsd_proc2_values[i].present ; i++) {
+ if(unlikely(!nfsd_proc2_values[i].rd))
+ nfsd_proc2_values[i].rd = rrddim_add(st, nfsd_proc2_values[i].name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st, nfsd_proc2_values[i].rd, nfsd_proc2_values[i].value);
+ }
+
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_proc3 == 2) {
+ static RRDSET *st = NULL;
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "nfsd"
+ , "proc3"
+ , NULL
+ , "nfsv3rpc"
+ , NULL
+ , "NFS v3 Server Remote Procedure Calls"
+ , "calls/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NFSD_NAME
+ , NETDATA_CHART_PRIO_NFSD_PROC3
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+ }
+ else rrdset_next(st);
+
+ size_t i;
+ for(i = 0; nfsd_proc3_values[i].present ; i++) {
+ if(unlikely(!nfsd_proc3_values[i].rd))
+ nfsd_proc3_values[i].rd = rrddim_add(st, nfsd_proc3_values[i].name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st, nfsd_proc3_values[i].rd, nfsd_proc3_values[i].value);
+ }
+
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_proc4 == 2) {
+ static RRDSET *st = NULL;
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "nfsd"
+ , "proc4"
+ , NULL
+ , "nfsv4rpc"
+ , NULL
+ , "NFS v4 Server Remote Procedure Calls"
+ , "calls/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NFSD_NAME
+ , NETDATA_CHART_PRIO_NFSD_PROC4
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+ }
+ else rrdset_next(st);
+
+ size_t i;
+ for(i = 0; nfsd_proc4_values[i].present ; i++) {
+ if(unlikely(!nfsd_proc4_values[i].rd))
+ nfsd_proc4_values[i].rd = rrddim_add(st, nfsd_proc4_values[i].name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st, nfsd_proc4_values[i].rd, nfsd_proc4_values[i].value);
+ }
+
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_proc4ops == 2) {
+ static RRDSET *st = NULL;
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "nfsd"
+ , "proc4ops"
+ , NULL
+ , "nfsv2ops"
+ , NULL
+ , "NFS v4 Server Operations"
+ , "operations/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NFSD_NAME
+ , NETDATA_CHART_PRIO_NFSD_PROC4OPS
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+ }
+ else rrdset_next(st);
+
+ size_t i;
+ for(i = 0; nfsd4_ops_values[i].present ; i++) {
+ if(unlikely(!nfsd4_ops_values[i].rd))
+ nfsd4_ops_values[i].rd = rrddim_add(st, nfsd4_ops_values[i].name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(st, nfsd4_ops_values[i].rd, nfsd4_ops_values[i].value);
+ }
+
+ rrdset_done(st);
+ }
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_net_sctp_snmp.c b/collectors/proc.plugin/proc_net_sctp_snmp.c
new file mode 100644
index 000000000..bd1062e98
--- /dev/null
+++ b/collectors/proc.plugin/proc_net_sctp_snmp.c
@@ -0,0 +1,352 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+#define PLUGIN_PROC_MODULE_NET_SCTP_SNMP_NAME "/proc/net/sctp/snmp"
+
+int do_proc_net_sctp_snmp(int update_every, usec_t dt) {
+ (void)dt;
+
+ static procfile *ff = NULL;
+
+ static int
+ do_associations = -1,
+ do_transitions = -1,
+ do_packet_errors = -1,
+ do_packets = -1,
+ do_fragmentation = -1,
+ do_chunk_types = -1;
+
+ static ARL_BASE *arl_base = NULL;
+
+ static unsigned long long SctpCurrEstab = 0ULL;
+ static unsigned long long SctpActiveEstabs = 0ULL;
+ static unsigned long long SctpPassiveEstabs = 0ULL;
+ static unsigned long long SctpAborteds = 0ULL;
+ static unsigned long long SctpShutdowns = 0ULL;
+ static unsigned long long SctpOutOfBlues = 0ULL;
+ static unsigned long long SctpChecksumErrors = 0ULL;
+ static unsigned long long SctpOutCtrlChunks = 0ULL;
+ static unsigned long long SctpOutOrderChunks = 0ULL;
+ static unsigned long long SctpOutUnorderChunks = 0ULL;
+ static unsigned long long SctpInCtrlChunks = 0ULL;
+ static unsigned long long SctpInOrderChunks = 0ULL;
+ static unsigned long long SctpInUnorderChunks = 0ULL;
+ static unsigned long long SctpFragUsrMsgs = 0ULL;
+ static unsigned long long SctpReasmUsrMsgs = 0ULL;
+ static unsigned long long SctpOutSCTPPacks = 0ULL;
+ static unsigned long long SctpInSCTPPacks = 0ULL;
+ static unsigned long long SctpT1InitExpireds = 0ULL;
+ static unsigned long long SctpT1CookieExpireds = 0ULL;
+ static unsigned long long SctpT2ShutdownExpireds = 0ULL;
+ static unsigned long long SctpT3RtxExpireds = 0ULL;
+ static unsigned long long SctpT4RtoExpireds = 0ULL;
+ static unsigned long long SctpT5ShutdownGuardExpireds = 0ULL;
+ static unsigned long long SctpDelaySackExpireds = 0ULL;
+ static unsigned long long SctpAutocloseExpireds = 0ULL;
+ static unsigned long long SctpT3Retransmits = 0ULL;
+ static unsigned long long SctpPmtudRetransmits = 0ULL;
+ static unsigned long long SctpFastRetransmits = 0ULL;
+ static unsigned long long SctpInPktSoftirq = 0ULL;
+ static unsigned long long SctpInPktBacklog = 0ULL;
+ static unsigned long long SctpInPktDiscards = 0ULL;
+ static unsigned long long SctpInDataChunkDiscards = 0ULL;
+
+ if(unlikely(!arl_base)) {
+ do_associations = config_get_boolean_ondemand("plugin:proc:/proc/net/sctp/snmp", "established associations", CONFIG_BOOLEAN_AUTO);
+ do_transitions = config_get_boolean_ondemand("plugin:proc:/proc/net/sctp/snmp", "association transitions", CONFIG_BOOLEAN_AUTO);
+ do_fragmentation = config_get_boolean_ondemand("plugin:proc:/proc/net/sctp/snmp", "fragmentation", CONFIG_BOOLEAN_AUTO);
+ do_packets = config_get_boolean_ondemand("plugin:proc:/proc/net/sctp/snmp", "packets", CONFIG_BOOLEAN_AUTO);
+ do_packet_errors = config_get_boolean_ondemand("plugin:proc:/proc/net/sctp/snmp", "packet errors", CONFIG_BOOLEAN_AUTO);
+ do_chunk_types = config_get_boolean_ondemand("plugin:proc:/proc/net/sctp/snmp", "chunk types", CONFIG_BOOLEAN_AUTO);
+
+ arl_base = arl_create("sctp", NULL, 60);
+ arl_expect(arl_base, "SctpCurrEstab", &SctpCurrEstab);
+ arl_expect(arl_base, "SctpActiveEstabs", &SctpActiveEstabs);
+ arl_expect(arl_base, "SctpPassiveEstabs", &SctpPassiveEstabs);
+ arl_expect(arl_base, "SctpAborteds", &SctpAborteds);
+ arl_expect(arl_base, "SctpShutdowns", &SctpShutdowns);
+ arl_expect(arl_base, "SctpOutOfBlues", &SctpOutOfBlues);
+ arl_expect(arl_base, "SctpChecksumErrors", &SctpChecksumErrors);
+ arl_expect(arl_base, "SctpOutCtrlChunks", &SctpOutCtrlChunks);
+ arl_expect(arl_base, "SctpOutOrderChunks", &SctpOutOrderChunks);
+ arl_expect(arl_base, "SctpOutUnorderChunks", &SctpOutUnorderChunks);
+ arl_expect(arl_base, "SctpInCtrlChunks", &SctpInCtrlChunks);
+ arl_expect(arl_base, "SctpInOrderChunks", &SctpInOrderChunks);
+ arl_expect(arl_base, "SctpInUnorderChunks", &SctpInUnorderChunks);
+ arl_expect(arl_base, "SctpFragUsrMsgs", &SctpFragUsrMsgs);
+ arl_expect(arl_base, "SctpReasmUsrMsgs", &SctpReasmUsrMsgs);
+ arl_expect(arl_base, "SctpOutSCTPPacks", &SctpOutSCTPPacks);
+ arl_expect(arl_base, "SctpInSCTPPacks", &SctpInSCTPPacks);
+ arl_expect(arl_base, "SctpT1InitExpireds", &SctpT1InitExpireds);
+ arl_expect(arl_base, "SctpT1CookieExpireds", &SctpT1CookieExpireds);
+ arl_expect(arl_base, "SctpT2ShutdownExpireds", &SctpT2ShutdownExpireds);
+ arl_expect(arl_base, "SctpT3RtxExpireds", &SctpT3RtxExpireds);
+ arl_expect(arl_base, "SctpT4RtoExpireds", &SctpT4RtoExpireds);
+ arl_expect(arl_base, "SctpT5ShutdownGuardExpireds", &SctpT5ShutdownGuardExpireds);
+ arl_expect(arl_base, "SctpDelaySackExpireds", &SctpDelaySackExpireds);
+ arl_expect(arl_base, "SctpAutocloseExpireds", &SctpAutocloseExpireds);
+ arl_expect(arl_base, "SctpT3Retransmits", &SctpT3Retransmits);
+ arl_expect(arl_base, "SctpPmtudRetransmits", &SctpPmtudRetransmits);
+ arl_expect(arl_base, "SctpFastRetransmits", &SctpFastRetransmits);
+ arl_expect(arl_base, "SctpInPktSoftirq", &SctpInPktSoftirq);
+ arl_expect(arl_base, "SctpInPktBacklog", &SctpInPktBacklog);
+ arl_expect(arl_base, "SctpInPktDiscards", &SctpInPktDiscards);
+ arl_expect(arl_base, "SctpInDataChunkDiscards", &SctpInDataChunkDiscards);
+ }
+
+ if(unlikely(!ff)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/sctp/snmp");
+ ff = procfile_open(config_get("plugin:proc:/proc/net/sctp/snmp", "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff))
+ return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff))
+ return 0; // we return 0, so that we will retry to open it next time
+
+ size_t lines = procfile_lines(ff), l;
+
+ arl_begin(arl_base);
+
+ for(l = 0; l < lines ;l++) {
+ size_t words = procfile_linewords(ff, l);
+ if(unlikely(words < 2)) {
+ if(unlikely(words)) error("Cannot read /proc/net/sctp/snmp line %zu. Expected 2 params, read %zu.", l, words);
+ continue;
+ }
+
+ if(unlikely(arl_check(arl_base,
+ procfile_lineword(ff, l, 0),
+ procfile_lineword(ff, l, 1)))) break;
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_associations == CONFIG_BOOLEAN_YES || (do_associations == CONFIG_BOOLEAN_AUTO && SctpCurrEstab)) {
+ do_associations = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_established = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "sctp"
+ , "established"
+ , NULL
+ , "associations"
+ , NULL
+ , "SCTP current total number of established associations"
+ , "associations"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SCTP_SNMP_NAME
+ , NETDATA_CHART_PRIO_SCTP
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_established = rrddim_add(st, "SctpCurrEstab", "established", 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_established, SctpCurrEstab);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_transitions == CONFIG_BOOLEAN_YES || (do_transitions == CONFIG_BOOLEAN_AUTO && (SctpActiveEstabs || SctpPassiveEstabs || SctpAborteds || SctpShutdowns))) {
+ do_transitions = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_active = NULL,
+ *rd_passive = NULL,
+ *rd_aborted = NULL,
+ *rd_shutdown = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "sctp"
+ , "transitions"
+ , NULL
+ , "transitions"
+ , NULL
+ , "SCTP Association Transitions"
+ , "transitions/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SCTP_SNMP_NAME
+ , NETDATA_CHART_PRIO_SCTP + 10
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_active = rrddim_add(st, "SctpActiveEstabs", "active", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_passive = rrddim_add(st, "SctpPassiveEstabs", "passive", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_aborted = rrddim_add(st, "SctpAborteds", "aborted", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_shutdown = rrddim_add(st, "SctpShutdowns", "shutdown", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_active, SctpActiveEstabs);
+ rrddim_set_by_pointer(st, rd_passive, SctpPassiveEstabs);
+ rrddim_set_by_pointer(st, rd_aborted, SctpAborteds);
+ rrddim_set_by_pointer(st, rd_shutdown, SctpShutdowns);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_packets == CONFIG_BOOLEAN_YES || (do_packets == CONFIG_BOOLEAN_AUTO && (SctpInSCTPPacks || SctpOutSCTPPacks))) {
+ do_packets = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_received = NULL,
+ *rd_sent = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "sctp"
+ , "packets"
+ , NULL
+ , "packets"
+ , NULL
+ , "SCTP Packets"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SCTP_SNMP_NAME
+ , NETDATA_CHART_PRIO_SCTP + 20
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_received = rrddim_add(st, "SctpInSCTPPacks", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_sent = rrddim_add(st, "SctpOutSCTPPacks", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_received, SctpInSCTPPacks);
+ rrddim_set_by_pointer(st, rd_sent, SctpOutSCTPPacks);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_packet_errors == CONFIG_BOOLEAN_YES || (do_packet_errors == CONFIG_BOOLEAN_AUTO && (SctpOutOfBlues || SctpChecksumErrors))) {
+ do_packet_errors = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_invalid = NULL,
+ *rd_csum = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "sctp"
+ , "packet_errors"
+ , NULL
+ , "packets"
+ , NULL
+ , "SCTP Packet Errors"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SCTP_SNMP_NAME
+ , NETDATA_CHART_PRIO_SCTP + 30
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_invalid = rrddim_add(st, "SctpOutOfBlues", "invalid", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_csum = rrddim_add(st, "SctpChecksumErrors", "checksum", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_invalid, SctpOutOfBlues);
+ rrddim_set_by_pointer(st, rd_csum, SctpChecksumErrors);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_fragmentation == CONFIG_BOOLEAN_YES || (do_fragmentation == CONFIG_BOOLEAN_AUTO && (SctpFragUsrMsgs || SctpReasmUsrMsgs))) {
+ do_fragmentation = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_fragmented = NULL,
+ *rd_reassembled = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "sctp"
+ , "fragmentation"
+ , NULL
+ , "fragmentation"
+ , NULL
+ , "SCTP Fragmentation"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SCTP_SNMP_NAME
+ , NETDATA_CHART_PRIO_SCTP + 40
+ , update_every
+ , RRDSET_TYPE_LINE);
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_reassembled = rrddim_add(st, "SctpReasmUsrMsgs", "reassembled", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_fragmented = rrddim_add(st, "SctpFragUsrMsgs", "fragmented", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_reassembled, SctpReasmUsrMsgs);
+ rrddim_set_by_pointer(st, rd_fragmented, SctpFragUsrMsgs);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_chunk_types == CONFIG_BOOLEAN_YES || (do_chunk_types == CONFIG_BOOLEAN_AUTO
+ && (SctpInCtrlChunks || SctpInOrderChunks || SctpInUnorderChunks || SctpOutCtrlChunks || SctpOutOrderChunks || SctpOutUnorderChunks))) {
+ do_chunk_types = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM
+ *rd_InCtrl = NULL,
+ *rd_InOrder = NULL,
+ *rd_InUnorder = NULL,
+ *rd_OutCtrl = NULL,
+ *rd_OutOrder = NULL,
+ *rd_OutUnorder = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "sctp"
+ , "chunks"
+ , NULL
+ , "chunks"
+ , NULL
+ , "SCTP Chunk Types"
+ , "chunks/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SCTP_SNMP_NAME
+ , NETDATA_CHART_PRIO_SCTP + 50
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_InCtrl = rrddim_add(st, "SctpInCtrlChunks", "InCtrl", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InOrder = rrddim_add(st, "SctpInOrderChunks", "InOrder", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InUnorder = rrddim_add(st, "SctpInUnorderChunks", "InUnorder", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutCtrl = rrddim_add(st, "SctpOutCtrlChunks", "OutCtrl", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutOrder = rrddim_add(st, "SctpOutOrderChunks", "OutOrder", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutUnorder = rrddim_add(st, "SctpOutUnorderChunks", "OutUnorder", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_InCtrl, SctpInCtrlChunks);
+ rrddim_set_by_pointer(st, rd_InOrder, SctpInOrderChunks);
+ rrddim_set_by_pointer(st, rd_InUnorder, SctpInUnorderChunks);
+ rrddim_set_by_pointer(st, rd_OutCtrl, SctpOutCtrlChunks);
+ rrddim_set_by_pointer(st, rd_OutOrder, SctpOutOrderChunks);
+ rrddim_set_by_pointer(st, rd_OutUnorder, SctpOutUnorderChunks);
+ rrdset_done(st);
+ }
+
+ return 0;
+}
+
diff --git a/collectors/proc.plugin/proc_net_snmp.c b/collectors/proc.plugin/proc_net_snmp.c
new file mode 100644
index 000000000..ffd368f6e
--- /dev/null
+++ b/collectors/proc.plugin/proc_net_snmp.c
@@ -0,0 +1,1085 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+#define PLUGIN_PROC_MODULE_NET_SNMP_NAME "/proc/net/snmp"
+
+#define RRD_TYPE_NET_SNMP "ipv4"
+
+static struct proc_net_snmp {
+ // kernel_uint_t ip_Forwarding;
+ kernel_uint_t ip_DefaultTTL;
+ kernel_uint_t ip_InReceives;
+ kernel_uint_t ip_InHdrErrors;
+ kernel_uint_t ip_InAddrErrors;
+ kernel_uint_t ip_ForwDatagrams;
+ kernel_uint_t ip_InUnknownProtos;
+ kernel_uint_t ip_InDiscards;
+ kernel_uint_t ip_InDelivers;
+ kernel_uint_t ip_OutRequests;
+ kernel_uint_t ip_OutDiscards;
+ kernel_uint_t ip_OutNoRoutes;
+ kernel_uint_t ip_ReasmTimeout;
+ kernel_uint_t ip_ReasmReqds;
+ kernel_uint_t ip_ReasmOKs;
+ kernel_uint_t ip_ReasmFails;
+ kernel_uint_t ip_FragOKs;
+ kernel_uint_t ip_FragFails;
+ kernel_uint_t ip_FragCreates;
+
+ kernel_uint_t icmp_InMsgs;
+ kernel_uint_t icmp_OutMsgs;
+ kernel_uint_t icmp_InErrors;
+ kernel_uint_t icmp_OutErrors;
+ kernel_uint_t icmp_InCsumErrors;
+
+ kernel_uint_t icmpmsg_InEchoReps;
+ kernel_uint_t icmpmsg_OutEchoReps;
+ kernel_uint_t icmpmsg_InDestUnreachs;
+ kernel_uint_t icmpmsg_OutDestUnreachs;
+ kernel_uint_t icmpmsg_InRedirects;
+ kernel_uint_t icmpmsg_OutRedirects;
+ kernel_uint_t icmpmsg_InEchos;
+ kernel_uint_t icmpmsg_OutEchos;
+ kernel_uint_t icmpmsg_InRouterAdvert;
+ kernel_uint_t icmpmsg_OutRouterAdvert;
+ kernel_uint_t icmpmsg_InRouterSelect;
+ kernel_uint_t icmpmsg_OutRouterSelect;
+ kernel_uint_t icmpmsg_InTimeExcds;
+ kernel_uint_t icmpmsg_OutTimeExcds;
+ kernel_uint_t icmpmsg_InParmProbs;
+ kernel_uint_t icmpmsg_OutParmProbs;
+ kernel_uint_t icmpmsg_InTimestamps;
+ kernel_uint_t icmpmsg_OutTimestamps;
+ kernel_uint_t icmpmsg_InTimestampReps;
+ kernel_uint_t icmpmsg_OutTimestampReps;
+
+ //kernel_uint_t tcp_RtoAlgorithm;
+ //kernel_uint_t tcp_RtoMin;
+ //kernel_uint_t tcp_RtoMax;
+ ssize_t tcp_MaxConn;
+ kernel_uint_t tcp_ActiveOpens;
+ kernel_uint_t tcp_PassiveOpens;
+ kernel_uint_t tcp_AttemptFails;
+ kernel_uint_t tcp_EstabResets;
+ kernel_uint_t tcp_CurrEstab;
+ kernel_uint_t tcp_InSegs;
+ kernel_uint_t tcp_OutSegs;
+ kernel_uint_t tcp_RetransSegs;
+ kernel_uint_t tcp_InErrs;
+ kernel_uint_t tcp_OutRsts;
+ kernel_uint_t tcp_InCsumErrors;
+
+ kernel_uint_t udp_InDatagrams;
+ kernel_uint_t udp_NoPorts;
+ kernel_uint_t udp_InErrors;
+ kernel_uint_t udp_OutDatagrams;
+ kernel_uint_t udp_RcvbufErrors;
+ kernel_uint_t udp_SndbufErrors;
+ kernel_uint_t udp_InCsumErrors;
+ kernel_uint_t udp_IgnoredMulti;
+
+ kernel_uint_t udplite_InDatagrams;
+ kernel_uint_t udplite_NoPorts;
+ kernel_uint_t udplite_InErrors;
+ kernel_uint_t udplite_OutDatagrams;
+ kernel_uint_t udplite_RcvbufErrors;
+ kernel_uint_t udplite_SndbufErrors;
+ kernel_uint_t udplite_InCsumErrors;
+ kernel_uint_t udplite_IgnoredMulti;
+} snmp_root = { 0 };
+
+int do_proc_net_snmp(int update_every, usec_t dt) {
+ (void)dt;
+
+ static procfile *ff = NULL;
+ static int do_ip_packets = -1, do_ip_fragsout = -1, do_ip_fragsin = -1, do_ip_errors = -1,
+ do_tcp_sockets = -1, do_tcp_packets = -1, do_tcp_errors = -1, do_tcp_handshake = -1, do_tcp_opens = -1,
+ do_udp_packets = -1, do_udp_errors = -1, do_icmp_packets = -1, do_icmpmsg = -1, do_udplite_packets = -1;
+ static uint32_t hash_ip = 0, hash_icmp = 0, hash_tcp = 0, hash_udp = 0, hash_icmpmsg = 0, hash_udplite = 0;
+
+ static ARL_BASE *arl_ip = NULL,
+ *arl_icmp = NULL,
+ *arl_icmpmsg = NULL,
+ *arl_tcp = NULL,
+ *arl_udp = NULL,
+ *arl_udplite = NULL;
+
+ static RRDVAR *tcp_max_connections_var = NULL;
+
+ if(unlikely(!arl_ip)) {
+ do_ip_packets = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 packets", CONFIG_BOOLEAN_AUTO);
+ do_ip_fragsout = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 fragments sent", CONFIG_BOOLEAN_AUTO);
+ do_ip_fragsin = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 fragments assembly", CONFIG_BOOLEAN_AUTO);
+ do_ip_errors = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 errors", CONFIG_BOOLEAN_AUTO);
+ do_tcp_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 TCP connections", CONFIG_BOOLEAN_AUTO);
+ do_tcp_packets = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 TCP packets", CONFIG_BOOLEAN_AUTO);
+ do_tcp_errors = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 TCP errors", CONFIG_BOOLEAN_AUTO);
+ do_tcp_opens = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 TCP opens", CONFIG_BOOLEAN_AUTO);
+ do_tcp_handshake = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 TCP handshake issues", CONFIG_BOOLEAN_AUTO);
+ do_udp_packets = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 UDP packets", CONFIG_BOOLEAN_AUTO);
+ do_udp_errors = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 UDP errors", CONFIG_BOOLEAN_AUTO);
+ do_icmp_packets = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 ICMP packets", CONFIG_BOOLEAN_AUTO);
+ do_icmpmsg = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 ICMP messages", CONFIG_BOOLEAN_AUTO);
+ do_udplite_packets = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 UDPLite packets", CONFIG_BOOLEAN_AUTO);
+
+ hash_ip = simple_hash("Ip");
+ hash_tcp = simple_hash("Tcp");
+ hash_udp = simple_hash("Udp");
+ hash_icmp = simple_hash("Icmp");
+ hash_icmpmsg = simple_hash("IcmpMsg");
+ hash_udplite = simple_hash("UdpLite");
+
+ arl_ip = arl_create("snmp/Ip", arl_callback_str2kernel_uint_t, 60);
+ // arl_expect(arl_ip, "Forwarding", &snmp_root.ip_Forwarding);
+ arl_expect(arl_ip, "DefaultTTL", &snmp_root.ip_DefaultTTL);
+ arl_expect(arl_ip, "InReceives", &snmp_root.ip_InReceives);
+ arl_expect(arl_ip, "InHdrErrors", &snmp_root.ip_InHdrErrors);
+ arl_expect(arl_ip, "InAddrErrors", &snmp_root.ip_InAddrErrors);
+ arl_expect(arl_ip, "ForwDatagrams", &snmp_root.ip_ForwDatagrams);
+ arl_expect(arl_ip, "InUnknownProtos", &snmp_root.ip_InUnknownProtos);
+ arl_expect(arl_ip, "InDiscards", &snmp_root.ip_InDiscards);
+ arl_expect(arl_ip, "InDelivers", &snmp_root.ip_InDelivers);
+ arl_expect(arl_ip, "OutRequests", &snmp_root.ip_OutRequests);
+ arl_expect(arl_ip, "OutDiscards", &snmp_root.ip_OutDiscards);
+ arl_expect(arl_ip, "OutNoRoutes", &snmp_root.ip_OutNoRoutes);
+ arl_expect(arl_ip, "ReasmTimeout", &snmp_root.ip_ReasmTimeout);
+ arl_expect(arl_ip, "ReasmReqds", &snmp_root.ip_ReasmReqds);
+ arl_expect(arl_ip, "ReasmOKs", &snmp_root.ip_ReasmOKs);
+ arl_expect(arl_ip, "ReasmFails", &snmp_root.ip_ReasmFails);
+ arl_expect(arl_ip, "FragOKs", &snmp_root.ip_FragOKs);
+ arl_expect(arl_ip, "FragFails", &snmp_root.ip_FragFails);
+ arl_expect(arl_ip, "FragCreates", &snmp_root.ip_FragCreates);
+
+ arl_icmp = arl_create("snmp/Icmp", arl_callback_str2kernel_uint_t, 60);
+ arl_expect(arl_icmp, "InMsgs", &snmp_root.icmp_InMsgs);
+ arl_expect(arl_icmp, "OutMsgs", &snmp_root.icmp_OutMsgs);
+ arl_expect(arl_icmp, "InErrors", &snmp_root.icmp_InErrors);
+ arl_expect(arl_icmp, "OutErrors", &snmp_root.icmp_OutErrors);
+ arl_expect(arl_icmp, "InCsumErrors", &snmp_root.icmp_InCsumErrors);
+
+ arl_icmpmsg = arl_create("snmp/Icmpmsg", arl_callback_str2kernel_uint_t, 60);
+ arl_expect(arl_icmpmsg, "InType0", &snmp_root.icmpmsg_InEchoReps);
+ arl_expect(arl_icmpmsg, "OutType0", &snmp_root.icmpmsg_OutEchoReps);
+ arl_expect(arl_icmpmsg, "InType3", &snmp_root.icmpmsg_InDestUnreachs);
+ arl_expect(arl_icmpmsg, "OutType3", &snmp_root.icmpmsg_OutDestUnreachs);
+ arl_expect(arl_icmpmsg, "InType5", &snmp_root.icmpmsg_InRedirects);
+ arl_expect(arl_icmpmsg, "OutType5", &snmp_root.icmpmsg_OutRedirects);
+ arl_expect(arl_icmpmsg, "InType8", &snmp_root.icmpmsg_InEchos);
+ arl_expect(arl_icmpmsg, "OutType8", &snmp_root.icmpmsg_OutEchos);
+ arl_expect(arl_icmpmsg, "InType9", &snmp_root.icmpmsg_InRouterAdvert);
+ arl_expect(arl_icmpmsg, "OutType9", &snmp_root.icmpmsg_OutRouterAdvert);
+ arl_expect(arl_icmpmsg, "InType10", &snmp_root.icmpmsg_InRouterSelect);
+ arl_expect(arl_icmpmsg, "OutType10", &snmp_root.icmpmsg_OutRouterSelect);
+ arl_expect(arl_icmpmsg, "InType11", &snmp_root.icmpmsg_InTimeExcds);
+ arl_expect(arl_icmpmsg, "OutType11", &snmp_root.icmpmsg_OutTimeExcds);
+ arl_expect(arl_icmpmsg, "InType12", &snmp_root.icmpmsg_InParmProbs);
+ arl_expect(arl_icmpmsg, "OutType12", &snmp_root.icmpmsg_OutParmProbs);
+ arl_expect(arl_icmpmsg, "InType13", &snmp_root.icmpmsg_InTimestamps);
+ arl_expect(arl_icmpmsg, "OutType13", &snmp_root.icmpmsg_OutTimestamps);
+ arl_expect(arl_icmpmsg, "InType14", &snmp_root.icmpmsg_InTimestampReps);
+ arl_expect(arl_icmpmsg, "OutType14", &snmp_root.icmpmsg_OutTimestampReps);
+
+ arl_tcp = arl_create("snmp/Tcp", arl_callback_str2kernel_uint_t, 60);
+ // arl_expect(arl_tcp, "RtoAlgorithm", &snmp_root.tcp_RtoAlgorithm);
+ // arl_expect(arl_tcp, "RtoMin", &snmp_root.tcp_RtoMin);
+ // arl_expect(arl_tcp, "RtoMax", &snmp_root.tcp_RtoMax);
+ arl_expect_custom(arl_tcp, "MaxConn", arl_callback_ssize_t, &snmp_root.tcp_MaxConn);
+ arl_expect(arl_tcp, "ActiveOpens", &snmp_root.tcp_ActiveOpens);
+ arl_expect(arl_tcp, "PassiveOpens", &snmp_root.tcp_PassiveOpens);
+ arl_expect(arl_tcp, "AttemptFails", &snmp_root.tcp_AttemptFails);
+ arl_expect(arl_tcp, "EstabResets", &snmp_root.tcp_EstabResets);
+ arl_expect(arl_tcp, "CurrEstab", &snmp_root.tcp_CurrEstab);
+ arl_expect(arl_tcp, "InSegs", &snmp_root.tcp_InSegs);
+ arl_expect(arl_tcp, "OutSegs", &snmp_root.tcp_OutSegs);
+ arl_expect(arl_tcp, "RetransSegs", &snmp_root.tcp_RetransSegs);
+ arl_expect(arl_tcp, "InErrs", &snmp_root.tcp_InErrs);
+ arl_expect(arl_tcp, "OutRsts", &snmp_root.tcp_OutRsts);
+ arl_expect(arl_tcp, "InCsumErrors", &snmp_root.tcp_InCsumErrors);
+
+ arl_udp = arl_create("snmp/Udp", arl_callback_str2kernel_uint_t, 60);
+ arl_expect(arl_udp, "InDatagrams", &snmp_root.udp_InDatagrams);
+ arl_expect(arl_udp, "NoPorts", &snmp_root.udp_NoPorts);
+ arl_expect(arl_udp, "InErrors", &snmp_root.udp_InErrors);
+ arl_expect(arl_udp, "OutDatagrams", &snmp_root.udp_OutDatagrams);
+ arl_expect(arl_udp, "RcvbufErrors", &snmp_root.udp_RcvbufErrors);
+ arl_expect(arl_udp, "SndbufErrors", &snmp_root.udp_SndbufErrors);
+ arl_expect(arl_udp, "InCsumErrors", &snmp_root.udp_InCsumErrors);
+ arl_expect(arl_udp, "IgnoredMulti", &snmp_root.udp_IgnoredMulti);
+
+ arl_udplite = arl_create("snmp/Udplite", arl_callback_str2kernel_uint_t, 60);
+ arl_expect(arl_udplite, "InDatagrams", &snmp_root.udplite_InDatagrams);
+ arl_expect(arl_udplite, "NoPorts", &snmp_root.udplite_NoPorts);
+ arl_expect(arl_udplite, "InErrors", &snmp_root.udplite_InErrors);
+ arl_expect(arl_udplite, "OutDatagrams", &snmp_root.udplite_OutDatagrams);
+ arl_expect(arl_udplite, "RcvbufErrors", &snmp_root.udplite_RcvbufErrors);
+ arl_expect(arl_udplite, "SndbufErrors", &snmp_root.udplite_SndbufErrors);
+ arl_expect(arl_udplite, "InCsumErrors", &snmp_root.udplite_InCsumErrors);
+ arl_expect(arl_udplite, "IgnoredMulti", &snmp_root.udplite_IgnoredMulti);
+
+ tcp_max_connections_var = rrdvar_custom_host_variable_create(localhost, "tcp_max_connections");
+ }
+
+ if(unlikely(!ff)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/snmp");
+ ff = procfile_open(config_get("plugin:proc:/proc/net/snmp", "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time
+
+ size_t lines = procfile_lines(ff), l;
+ size_t words, w;
+
+ for(l = 0; l < lines ;l++) {
+ char *key = procfile_lineword(ff, l, 0);
+ uint32_t hash = simple_hash(key);
+
+ if(unlikely(hash == hash_ip && strcmp(key, "Ip") == 0)) {
+ size_t h = l++;
+
+ if(strcmp(procfile_lineword(ff, l, 0), "Ip") != 0) {
+ error("Cannot read Ip line from /proc/net/snmp.");
+ break;
+ }
+
+ words = procfile_linewords(ff, l);
+ if(words < 3) {
+ error("Cannot read /proc/net/snmp Ip line. Expected 3+ params, read %zu.", words);
+ continue;
+ }
+
+ arl_begin(arl_ip);
+ for(w = 1; w < words ; w++) {
+ if (unlikely(arl_check(arl_ip, procfile_lineword(ff, h, w), procfile_lineword(ff, l, w)) != 0))
+ break;
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_ip_packets == CONFIG_BOOLEAN_YES || (do_ip_packets == CONFIG_BOOLEAN_AUTO && (snmp_root.ip_OutRequests || snmp_root.ip_InReceives || snmp_root.ip_ForwDatagrams || snmp_root.ip_InDelivers))) {
+ do_ip_packets = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_InReceives = NULL,
+ *rd_OutRequests = NULL,
+ *rd_ForwDatagrams = NULL,
+ *rd_InDelivers = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP
+ , "packets"
+ , NULL
+ , "packets"
+ , NULL
+ , "IPv4 Packets"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP_NAME
+ , NETDATA_CHART_PRIO_IPV4_PACKETS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_InReceives = rrddim_add(st, "InReceives", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutRequests = rrddim_add(st, "OutRequests", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_ForwDatagrams = rrddim_add(st, "ForwDatagrams", "forwarded", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InDelivers = rrddim_add(st, "InDelivers", "delivered", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_OutRequests, (collected_number)snmp_root.ip_OutRequests);
+ rrddim_set_by_pointer(st, rd_InReceives, (collected_number)snmp_root.ip_InReceives);
+ rrddim_set_by_pointer(st, rd_ForwDatagrams, (collected_number)snmp_root.ip_ForwDatagrams);
+ rrddim_set_by_pointer(st, rd_InDelivers, (collected_number)snmp_root.ip_InDelivers);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_ip_fragsout == CONFIG_BOOLEAN_YES || (do_ip_fragsout == CONFIG_BOOLEAN_AUTO && (snmp_root.ip_FragOKs || snmp_root.ip_FragFails || snmp_root.ip_FragCreates))) {
+ do_ip_fragsout = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_FragOKs = NULL,
+ *rd_FragFails = NULL,
+ *rd_FragCreates = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP
+ , "fragsout"
+ , NULL
+ , "fragments"
+ , NULL
+ , "IPv4 Fragments Sent"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP_NAME
+ , NETDATA_CHART_PRIO_IPV4_FRAGMENTS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_FragOKs = rrddim_add(st, "FragOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_FragFails = rrddim_add(st, "FragFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_FragCreates = rrddim_add(st, "FragCreates", "created", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_FragOKs, (collected_number)snmp_root.ip_FragOKs);
+ rrddim_set_by_pointer(st, rd_FragFails, (collected_number)snmp_root.ip_FragFails);
+ rrddim_set_by_pointer(st, rd_FragCreates, (collected_number)snmp_root.ip_FragCreates);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_ip_fragsin == CONFIG_BOOLEAN_YES || (do_ip_fragsin == CONFIG_BOOLEAN_AUTO && (snmp_root.ip_ReasmOKs || snmp_root.ip_ReasmFails || snmp_root.ip_ReasmReqds))) {
+ do_ip_fragsin = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_ReasmOKs = NULL,
+ *rd_ReasmFails = NULL,
+ *rd_ReasmReqds = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP
+ , "fragsin"
+ , NULL
+ , "fragments"
+ , NULL
+ , "IPv4 Fragments Reassembly"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP_NAME
+ , NETDATA_CHART_PRIO_IPV4_FRAGMENTS + 1
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_ReasmOKs = rrddim_add(st, "ReasmOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_ReasmFails = rrddim_add(st, "ReasmFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_ReasmReqds = rrddim_add(st, "ReasmReqds", "all", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_ReasmOKs, (collected_number)snmp_root.ip_ReasmOKs);
+ rrddim_set_by_pointer(st, rd_ReasmFails, (collected_number)snmp_root.ip_ReasmFails);
+ rrddim_set_by_pointer(st, rd_ReasmReqds, (collected_number)snmp_root.ip_ReasmReqds);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_ip_errors == CONFIG_BOOLEAN_YES || (do_ip_errors == CONFIG_BOOLEAN_AUTO && (snmp_root.ip_InDiscards || snmp_root.ip_OutDiscards || snmp_root.ip_InHdrErrors || snmp_root.ip_InAddrErrors || snmp_root.ip_InUnknownProtos || snmp_root.ip_OutNoRoutes))) {
+ do_ip_errors = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_InDiscards = NULL,
+ *rd_OutDiscards = NULL,
+ *rd_InHdrErrors = NULL,
+ *rd_OutNoRoutes = NULL,
+ *rd_InAddrErrors = NULL,
+ *rd_InUnknownProtos = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP
+ , "errors"
+ , NULL
+ , "errors"
+ , NULL
+ , "IPv4 Errors"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP_NAME
+ , NETDATA_CHART_PRIO_IPV4_ERRORS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_InDiscards = rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutDiscards = rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rd_InHdrErrors = rrddim_add(st, "InHdrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutNoRoutes = rrddim_add(st, "OutNoRoutes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rd_InAddrErrors = rrddim_add(st, "InAddrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InUnknownProtos = rrddim_add(st, "InUnknownProtos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_InDiscards, (collected_number)snmp_root.ip_InDiscards);
+ rrddim_set_by_pointer(st, rd_OutDiscards, (collected_number)snmp_root.ip_OutDiscards);
+ rrddim_set_by_pointer(st, rd_InHdrErrors, (collected_number)snmp_root.ip_InHdrErrors);
+ rrddim_set_by_pointer(st, rd_InAddrErrors, (collected_number)snmp_root.ip_InAddrErrors);
+ rrddim_set_by_pointer(st, rd_InUnknownProtos, (collected_number)snmp_root.ip_InUnknownProtos);
+ rrddim_set_by_pointer(st, rd_OutNoRoutes, (collected_number)snmp_root.ip_OutNoRoutes);
+ rrdset_done(st);
+ }
+ }
+ else if(unlikely(hash == hash_icmp && strcmp(key, "Icmp") == 0)) {
+ size_t h = l++;
+
+ if(strcmp(procfile_lineword(ff, l, 0), "Icmp") != 0) {
+ error("Cannot read Icmp line from /proc/net/snmp.");
+ break;
+ }
+
+ words = procfile_linewords(ff, l);
+ if(words < 3) {
+ error("Cannot read /proc/net/snmp Icmp line. Expected 3+ params, read %zu.", words);
+ continue;
+ }
+
+ arl_begin(arl_icmp);
+ for(w = 1; w < words ; w++) {
+ if (unlikely(arl_check(arl_icmp, procfile_lineword(ff, h, w), procfile_lineword(ff, l, w)) != 0))
+ break;
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_icmp_packets == CONFIG_BOOLEAN_YES || (do_icmp_packets == CONFIG_BOOLEAN_AUTO && (snmp_root.icmp_InMsgs || snmp_root.icmp_OutMsgs || snmp_root.icmp_InErrors || snmp_root.icmp_OutErrors || snmp_root.icmp_InCsumErrors))) {
+ do_icmp_packets = CONFIG_BOOLEAN_YES;
+
+ {
+ static RRDSET *st_packets = NULL;
+ static RRDDIM *rd_InMsgs = NULL,
+ *rd_OutMsgs = NULL;
+
+ if(unlikely(!st_packets)) {
+ st_packets = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP
+ , "icmp"
+ , NULL
+ , "icmp"
+ , NULL
+ , "IPv4 ICMP Packets"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP_NAME
+ , NETDATA_CHART_PRIO_IPV4_ICMP
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_InMsgs = rrddim_add(st_packets, "InMsgs", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutMsgs = rrddim_add(st_packets, "OutMsgs", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st_packets);
+
+ rrddim_set_by_pointer(st_packets, rd_InMsgs, (collected_number)snmp_root.icmp_InMsgs);
+ rrddim_set_by_pointer(st_packets, rd_OutMsgs, (collected_number)snmp_root.icmp_OutMsgs);
+
+ rrdset_done(st_packets);
+ }
+
+ {
+ static RRDSET *st_errors = NULL;
+ static RRDDIM *rd_InErrors = NULL,
+ *rd_OutErrors = NULL,
+ *rd_InCsumErrors = NULL;
+
+ if(unlikely(!st_errors)) {
+ st_errors = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP
+ , "icmp_errors"
+ , NULL
+ , "icmp"
+ , NULL
+ , "IPv4 ICMP Errors"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP_NAME
+ , NETDATA_CHART_PRIO_IPV4_ICMP + 1
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_InErrors = rrddim_add(st_errors, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutErrors = rrddim_add(st_errors, "OutErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InCsumErrors = rrddim_add(st_errors, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st_errors);
+
+ rrddim_set_by_pointer(st_errors, rd_InErrors, (collected_number)snmp_root.icmp_InErrors);
+ rrddim_set_by_pointer(st_errors, rd_OutErrors, (collected_number)snmp_root.icmp_OutErrors);
+ rrddim_set_by_pointer(st_errors, rd_InCsumErrors, (collected_number)snmp_root.icmp_InCsumErrors);
+
+ rrdset_done(st_errors);
+ }
+ }
+ }
+ else if(unlikely(hash == hash_icmpmsg && strcmp(key, "IcmpMsg") == 0)) {
+ size_t h = l++;
+
+ if(strcmp(procfile_lineword(ff, l, 0), "IcmpMsg") != 0) {
+ error("Cannot read IcmpMsg line from /proc/net/snmp.");
+ break;
+ }
+
+ words = procfile_linewords(ff, l);
+ if(words < 2) {
+ error("Cannot read /proc/net/snmp IcmpMsg line. Expected 2+ params, read %zu.", words);
+ continue;
+ }
+
+ arl_begin(arl_icmpmsg);
+ for(w = 1; w < words ; w++) {
+ if (unlikely(arl_check(arl_icmpmsg, procfile_lineword(ff, h, w), procfile_lineword(ff, l, w)) != 0))
+ break;
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_icmpmsg == CONFIG_BOOLEAN_YES || (do_icmpmsg == CONFIG_BOOLEAN_AUTO && (
+ snmp_root.icmpmsg_InEchoReps
+ || snmp_root.icmpmsg_OutEchoReps
+ || snmp_root.icmpmsg_InDestUnreachs
+ || snmp_root.icmpmsg_OutDestUnreachs
+ || snmp_root.icmpmsg_InRedirects
+ || snmp_root.icmpmsg_OutRedirects
+ || snmp_root.icmpmsg_InEchos
+ || snmp_root.icmpmsg_OutEchos
+ || snmp_root.icmpmsg_InRouterAdvert
+ || snmp_root.icmpmsg_OutRouterAdvert
+ || snmp_root.icmpmsg_InRouterSelect
+ || snmp_root.icmpmsg_OutRouterSelect
+ || snmp_root.icmpmsg_InTimeExcds
+ || snmp_root.icmpmsg_OutTimeExcds
+ || snmp_root.icmpmsg_InParmProbs
+ || snmp_root.icmpmsg_OutParmProbs
+ || snmp_root.icmpmsg_InTimestamps
+ || snmp_root.icmpmsg_OutTimestamps
+ || snmp_root.icmpmsg_InTimestampReps
+ || snmp_root.icmpmsg_OutTimestampReps
+ ))) {
+ do_icmpmsg = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_InEchoReps = NULL,
+ *rd_OutEchoReps = NULL,
+ *rd_InDestUnreachs = NULL,
+ *rd_OutDestUnreachs = NULL,
+ *rd_InRedirects = NULL,
+ *rd_OutRedirects = NULL,
+ *rd_InEchos = NULL,
+ *rd_OutEchos = NULL,
+ *rd_InRouterAdvert = NULL,
+ *rd_OutRouterAdvert = NULL,
+ *rd_InRouterSelect = NULL,
+ *rd_OutRouterSelect = NULL,
+ *rd_InTimeExcds = NULL,
+ *rd_OutTimeExcds = NULL,
+ *rd_InParmProbs = NULL,
+ *rd_OutParmProbs = NULL,
+ *rd_InTimestamps = NULL,
+ *rd_OutTimestamps = NULL,
+ *rd_InTimestampReps = NULL,
+ *rd_OutTimestampReps = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP
+ , "icmpmsg"
+ , NULL
+ , "icmp"
+ , NULL
+ , "IPv4 ICMP Messages"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP_NAME
+ , NETDATA_CHART_PRIO_IPV4_ICMP + 2
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_InEchoReps = rrddim_add(st, "InType0", "InEchoReps", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutEchoReps = rrddim_add(st, "OutType0", "OutEchoReps", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InDestUnreachs = rrddim_add(st, "InType3", "InDestUnreachs", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutDestUnreachs = rrddim_add(st, "OutType3", "OutDestUnreachs", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InRedirects = rrddim_add(st, "InType5", "InRedirects", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutRedirects = rrddim_add(st, "OutType5", "OutRedirects", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InEchos = rrddim_add(st, "InType8", "InEchos", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutEchos = rrddim_add(st, "OutType8", "OutEchos", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InRouterAdvert = rrddim_add(st, "InType9", "InRouterAdvert", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutRouterAdvert = rrddim_add(st, "OutType9", "OutRouterAdvert", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InRouterSelect = rrddim_add(st, "InType10", "InRouterSelect", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutRouterSelect = rrddim_add(st, "OutType10", "OutRouterSelect", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InTimeExcds = rrddim_add(st, "InType11", "InTimeExcds", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutTimeExcds = rrddim_add(st, "OutType11", "OutTimeExcds", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InParmProbs = rrddim_add(st, "InType12", "InParmProbs", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutParmProbs = rrddim_add(st, "OutType12", "OutParmProbs", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InTimestamps = rrddim_add(st, "InType13", "InTimestamps", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutTimestamps = rrddim_add(st, "OutType13", "OutTimestamps", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InTimestampReps = rrddim_add(st, "InType14", "InTimestampReps", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutTimestampReps = rrddim_add(st, "OutType14", "OutTimestampReps", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_InEchoReps, (collected_number)snmp_root.icmpmsg_InEchoReps);
+ rrddim_set_by_pointer(st, rd_OutEchoReps, (collected_number)snmp_root.icmpmsg_OutEchoReps);
+ rrddim_set_by_pointer(st, rd_InDestUnreachs, (collected_number)snmp_root.icmpmsg_InDestUnreachs);
+ rrddim_set_by_pointer(st, rd_OutDestUnreachs, (collected_number)snmp_root.icmpmsg_OutDestUnreachs);
+ rrddim_set_by_pointer(st, rd_InRedirects, (collected_number)snmp_root.icmpmsg_InRedirects);
+ rrddim_set_by_pointer(st, rd_OutRedirects, (collected_number)snmp_root.icmpmsg_OutRedirects);
+ rrddim_set_by_pointer(st, rd_InEchos, (collected_number)snmp_root.icmpmsg_InEchos);
+ rrddim_set_by_pointer(st, rd_OutEchos, (collected_number)snmp_root.icmpmsg_OutEchos);
+ rrddim_set_by_pointer(st, rd_InRouterAdvert, (collected_number)snmp_root.icmpmsg_InRouterAdvert);
+ rrddim_set_by_pointer(st, rd_OutRouterAdvert, (collected_number)snmp_root.icmpmsg_OutRouterAdvert);
+ rrddim_set_by_pointer(st, rd_InRouterSelect, (collected_number)snmp_root.icmpmsg_InRouterSelect);
+ rrddim_set_by_pointer(st, rd_OutRouterSelect, (collected_number)snmp_root.icmpmsg_OutRouterSelect);
+ rrddim_set_by_pointer(st, rd_InTimeExcds, (collected_number)snmp_root.icmpmsg_InTimeExcds);
+ rrddim_set_by_pointer(st, rd_OutTimeExcds, (collected_number)snmp_root.icmpmsg_OutTimeExcds);
+ rrddim_set_by_pointer(st, rd_InParmProbs, (collected_number)snmp_root.icmpmsg_InParmProbs);
+ rrddim_set_by_pointer(st, rd_OutParmProbs, (collected_number)snmp_root.icmpmsg_OutParmProbs);
+ rrddim_set_by_pointer(st, rd_InTimestamps, (collected_number)snmp_root.icmpmsg_InTimestamps);
+ rrddim_set_by_pointer(st, rd_OutTimestamps, (collected_number)snmp_root.icmpmsg_OutTimestamps);
+ rrddim_set_by_pointer(st, rd_InTimestampReps, (collected_number)snmp_root.icmpmsg_InTimestampReps);
+ rrddim_set_by_pointer(st, rd_OutTimestampReps, (collected_number)snmp_root.icmpmsg_OutTimestampReps);
+
+ rrdset_done(st);
+ }
+ }
+ else if(unlikely(hash == hash_tcp && strcmp(key, "Tcp") == 0)) {
+ size_t h = l++;
+
+ if(strcmp(procfile_lineword(ff, l, 0), "Tcp") != 0) {
+ error("Cannot read Tcp line from /proc/net/snmp.");
+ break;
+ }
+
+ words = procfile_linewords(ff, l);
+ if(words < 3) {
+ error("Cannot read /proc/net/snmp Tcp line. Expected 3+ params, read %zu.", words);
+ continue;
+ }
+
+ arl_begin(arl_tcp);
+ for(w = 1; w < words ; w++) {
+ if (unlikely(arl_check(arl_tcp, procfile_lineword(ff, h, w), procfile_lineword(ff, l, w)) != 0))
+ break;
+ }
+
+ // --------------------------------------------------------------------
+
+ // this is smart enough to update it, only when it is changed
+ rrdvar_custom_host_variable_set(localhost, tcp_max_connections_var, snmp_root.tcp_MaxConn);
+
+ // --------------------------------------------------------------------
+
+ // see http://net-snmp.sourceforge.net/docs/mibs/tcp.html
+ if(do_tcp_sockets == CONFIG_BOOLEAN_YES || (do_tcp_sockets == CONFIG_BOOLEAN_AUTO && snmp_root.tcp_CurrEstab)) {
+ do_tcp_sockets = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_CurrEstab = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP
+ , "tcpsock"
+ , NULL
+ , "tcp"
+ , NULL
+ , "IPv4 TCP Connections"
+ , "active connections"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP_NAME
+ , NETDATA_CHART_PRIO_IPV4_TCP
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_CurrEstab = rrddim_add(st, "CurrEstab", "connections", 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_CurrEstab, (collected_number)snmp_root.tcp_CurrEstab);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_tcp_packets == CONFIG_BOOLEAN_YES || (do_tcp_packets == CONFIG_BOOLEAN_AUTO && (snmp_root.tcp_InSegs || snmp_root.tcp_OutSegs))) {
+ do_tcp_packets = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_InSegs = NULL,
+ *rd_OutSegs = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP
+ , "tcppackets"
+ , NULL
+ , "tcp"
+ , NULL
+ , "IPv4 TCP Packets"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP_NAME
+ , NETDATA_CHART_PRIO_IPV4_TCP + 4
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_InSegs = rrddim_add(st, "InSegs", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutSegs = rrddim_add(st, "OutSegs", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_InSegs, (collected_number)snmp_root.tcp_InSegs);
+ rrddim_set_by_pointer(st, rd_OutSegs, (collected_number)snmp_root.tcp_OutSegs);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_tcp_errors == CONFIG_BOOLEAN_YES || (do_tcp_errors == CONFIG_BOOLEAN_AUTO && (snmp_root.tcp_InErrs || snmp_root.tcp_InCsumErrors || snmp_root.tcp_RetransSegs))) {
+ do_tcp_errors = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_InErrs = NULL,
+ *rd_InCsumErrors = NULL,
+ *rd_RetransSegs = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP
+ , "tcperrors"
+ , NULL
+ , "tcp"
+ , NULL
+ , "IPv4 TCP Errors"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP_NAME
+ , NETDATA_CHART_PRIO_IPV4_TCP + 20
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_InErrs = rrddim_add(st, "InErrs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InCsumErrors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_RetransSegs = rrddim_add(st, "RetransSegs", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_InErrs, (collected_number)snmp_root.tcp_InErrs);
+ rrddim_set_by_pointer(st, rd_InCsumErrors, (collected_number)snmp_root.tcp_InCsumErrors);
+ rrddim_set_by_pointer(st, rd_RetransSegs, (collected_number)snmp_root.tcp_RetransSegs);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_tcp_opens == CONFIG_BOOLEAN_YES || (do_tcp_opens == CONFIG_BOOLEAN_AUTO && (snmp_root.tcp_ActiveOpens || snmp_root.tcp_PassiveOpens))) {
+ do_tcp_opens = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_ActiveOpens = NULL,
+ *rd_PassiveOpens = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP
+ , "tcpopens"
+ , NULL
+ , "tcp"
+ , NULL
+ , "IPv4 TCP Opens"
+ , "connections/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP_NAME
+ , NETDATA_CHART_PRIO_IPV4_TCP + 5
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_ActiveOpens = rrddim_add(st, "ActiveOpens", "active", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_PassiveOpens = rrddim_add(st, "PassiveOpens", "passive", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_ActiveOpens, (collected_number)snmp_root.tcp_ActiveOpens);
+ rrddim_set_by_pointer(st, rd_PassiveOpens, (collected_number)snmp_root.tcp_PassiveOpens);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_tcp_handshake == CONFIG_BOOLEAN_YES || (do_tcp_handshake == CONFIG_BOOLEAN_AUTO && (snmp_root.tcp_EstabResets || snmp_root.tcp_OutRsts || snmp_root.tcp_AttemptFails))) {
+ do_tcp_handshake = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_EstabResets = NULL,
+ *rd_OutRsts = NULL,
+ *rd_AttemptFails = NULL,
+ *rd_TCPSynRetrans = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP
+ , "tcphandshake"
+ , NULL
+ , "tcp"
+ , NULL
+ , "IPv4 TCP Handshake Issues"
+ , "events/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP_NAME
+ , NETDATA_CHART_PRIO_IPV4_TCP + 30
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_EstabResets = rrddim_add(st, "EstabResets", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutRsts = rrddim_add(st, "OutRsts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_AttemptFails = rrddim_add(st, "AttemptFails", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_TCPSynRetrans = rrddim_add(st, "TCPSynRetrans", "SynRetrans", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_EstabResets, (collected_number)snmp_root.tcp_EstabResets);
+ rrddim_set_by_pointer(st, rd_OutRsts, (collected_number)snmp_root.tcp_OutRsts);
+ rrddim_set_by_pointer(st, rd_AttemptFails, (collected_number)snmp_root.tcp_AttemptFails);
+ rrddim_set_by_pointer(st, rd_TCPSynRetrans, tcpext_TCPSynRetrans);
+ rrdset_done(st);
+ }
+ }
+ else if(unlikely(hash == hash_udp && strcmp(key, "Udp") == 0)) {
+ size_t h = l++;
+
+ if(strcmp(procfile_lineword(ff, l, 0), "Udp") != 0) {
+ error("Cannot read Udp line from /proc/net/snmp.");
+ break;
+ }
+
+ words = procfile_linewords(ff, l);
+ if(words < 3) {
+ error("Cannot read /proc/net/snmp Udp line. Expected 3+ params, read %zu.", words);
+ continue;
+ }
+
+ arl_begin(arl_udp);
+ for(w = 1; w < words ; w++) {
+ if (unlikely(arl_check(arl_udp, procfile_lineword(ff, h, w), procfile_lineword(ff, l, w)) != 0))
+ break;
+ }
+
+ // --------------------------------------------------------------------
+
+ // see http://net-snmp.sourceforge.net/docs/mibs/udp.html
+ if(do_udp_packets == CONFIG_BOOLEAN_YES || (do_udp_packets == CONFIG_BOOLEAN_AUTO && (snmp_root.udp_InDatagrams || snmp_root.udp_OutDatagrams))) {
+ do_udp_packets = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_InDatagrams = NULL,
+ *rd_OutDatagrams = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP
+ , "udppackets"
+ , NULL
+ , "udp"
+ , NULL
+ , "IPv4 UDP Packets"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP_NAME
+ , NETDATA_CHART_PRIO_IPV4_UDP
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_InDatagrams = rrddim_add(st, "InDatagrams", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutDatagrams = rrddim_add(st, "OutDatagrams", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_InDatagrams, (collected_number)snmp_root.udp_InDatagrams);
+ rrddim_set_by_pointer(st, rd_OutDatagrams, (collected_number)snmp_root.udp_OutDatagrams);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_udp_errors == CONFIG_BOOLEAN_YES || (do_udp_errors == CONFIG_BOOLEAN_AUTO && (
+ snmp_root.udp_InErrors
+ || snmp_root.udp_NoPorts
+ || snmp_root.udp_RcvbufErrors
+ || snmp_root.udp_SndbufErrors
+ || snmp_root.udp_InCsumErrors
+ || snmp_root.udp_IgnoredMulti
+ ))) {
+ do_udp_errors = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_RcvbufErrors = NULL,
+ *rd_SndbufErrors = NULL,
+ *rd_InErrors = NULL,
+ *rd_NoPorts = NULL,
+ *rd_InCsumErrors = NULL,
+ *rd_IgnoredMulti = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP
+ , "udperrors"
+ , NULL
+ , "udp"
+ , NULL
+ , "IPv4 UDP Errors"
+ , "events/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP_NAME
+ , NETDATA_CHART_PRIO_IPV4_UDP + 10
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_RcvbufErrors = rrddim_add(st, "RcvbufErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_SndbufErrors = rrddim_add(st, "SndbufErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InErrors = rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_NoPorts = rrddim_add(st, "NoPorts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InCsumErrors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_IgnoredMulti = rrddim_add(st, "IgnoredMulti", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_InErrors, (collected_number)snmp_root.udp_InErrors);
+ rrddim_set_by_pointer(st, rd_NoPorts, (collected_number)snmp_root.udp_NoPorts);
+ rrddim_set_by_pointer(st, rd_RcvbufErrors, (collected_number)snmp_root.udp_RcvbufErrors);
+ rrddim_set_by_pointer(st, rd_SndbufErrors, (collected_number)snmp_root.udp_SndbufErrors);
+ rrddim_set_by_pointer(st, rd_InCsumErrors, (collected_number)snmp_root.udp_InCsumErrors);
+ rrddim_set_by_pointer(st, rd_IgnoredMulti, (collected_number)snmp_root.udp_IgnoredMulti);
+ rrdset_done(st);
+ }
+ }
+ else if(unlikely(hash == hash_udplite && strcmp(key, "UdpLite") == 0)) {
+ size_t h = l++;
+
+ if(strcmp(procfile_lineword(ff, l, 0), "UdpLite") != 0) {
+ error("Cannot read UdpLite line from /proc/net/snmp.");
+ break;
+ }
+
+ words = procfile_linewords(ff, l);
+ if(words < 3) {
+ error("Cannot read /proc/net/snmp UdpLite line. Expected 3+ params, read %zu.", words);
+ continue;
+ }
+
+ arl_begin(arl_udplite);
+ for(w = 1; w < words ; w++) {
+ if (unlikely(arl_check(arl_udplite, procfile_lineword(ff, h, w), procfile_lineword(ff, l, w)) != 0))
+ break;
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_udplite_packets == CONFIG_BOOLEAN_YES || (do_udplite_packets == CONFIG_BOOLEAN_AUTO && (
+ snmp_root.udplite_InDatagrams
+ || snmp_root.udplite_OutDatagrams
+ || snmp_root.udplite_NoPorts
+ || snmp_root.udplite_InErrors
+ || snmp_root.udplite_InCsumErrors
+ || snmp_root.udplite_RcvbufErrors
+ || snmp_root.udplite_SndbufErrors
+ || snmp_root.udplite_IgnoredMulti
+ ))) {
+ do_udplite_packets = CONFIG_BOOLEAN_YES;
+
+ {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_InDatagrams = NULL,
+ *rd_OutDatagrams = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP
+ , "udplite"
+ , NULL
+ , "udplite"
+ , NULL
+ , "IPv4 UDPLite Packets"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP_NAME
+ , NETDATA_CHART_PRIO_IPV4_UDPLITE
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_InDatagrams = rrddim_add(st, "InDatagrams", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutDatagrams = rrddim_add(st, "OutDatagrams", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_InDatagrams, (collected_number)snmp_root.udplite_InDatagrams);
+ rrddim_set_by_pointer(st, rd_OutDatagrams, (collected_number)snmp_root.udplite_OutDatagrams);
+ rrdset_done(st);
+ }
+
+ {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_RcvbufErrors = NULL,
+ *rd_SndbufErrors = NULL,
+ *rd_InErrors = NULL,
+ *rd_NoPorts = NULL,
+ *rd_InCsumErrors = NULL,
+ *rd_IgnoredMulti = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP
+ , "udplite_errors"
+ , NULL
+ , "udplite"
+ , NULL
+ , "IPv4 UDPLite Errors"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP_NAME
+ , NETDATA_CHART_PRIO_IPV4_UDPLITE + 10
+ , update_every
+ , RRDSET_TYPE_LINE);
+
+ rd_RcvbufErrors = rrddim_add(st, "RcvbufErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_SndbufErrors = rrddim_add(st, "SndbufErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InErrors = rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_NoPorts = rrddim_add(st, "NoPorts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InCsumErrors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_IgnoredMulti = rrddim_add(st, "IgnoredMulti", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_NoPorts, (collected_number)snmp_root.udplite_NoPorts);
+ rrddim_set_by_pointer(st, rd_InErrors, (collected_number)snmp_root.udplite_InErrors);
+ rrddim_set_by_pointer(st, rd_InCsumErrors, (collected_number)snmp_root.udplite_InCsumErrors);
+ rrddim_set_by_pointer(st, rd_RcvbufErrors, (collected_number)snmp_root.udplite_RcvbufErrors);
+ rrddim_set_by_pointer(st, rd_SndbufErrors, (collected_number)snmp_root.udplite_SndbufErrors);
+ rrddim_set_by_pointer(st, rd_IgnoredMulti, (collected_number)snmp_root.udplite_IgnoredMulti);
+ rrdset_done(st);
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
diff --git a/collectors/proc.plugin/proc_net_snmp6.c b/collectors/proc.plugin/proc_net_snmp6.c
new file mode 100644
index 000000000..f0084aa26
--- /dev/null
+++ b/collectors/proc.plugin/proc_net_snmp6.c
@@ -0,0 +1,1268 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define RRD_TYPE_NET_SNMP6 "ipv6"
+#define PLUGIN_PROC_MODULE_NET_SNMP6_NAME "/proc/net/snmp6"
+
+int do_proc_net_snmp6(int update_every, usec_t dt) {
+ (void)dt;
+
+ static procfile *ff = NULL;
+
+ static int do_ip_packets = -1,
+ do_ip_fragsout = -1,
+ do_ip_fragsin = -1,
+ do_ip_errors = -1,
+ do_udplite_packets = -1,
+ do_udplite_errors = -1,
+ do_udp_packets = -1,
+ do_udp_errors = -1,
+ do_bandwidth = -1,
+ do_mcast = -1,
+ do_bcast = -1,
+ do_mcast_p = -1,
+ do_icmp = -1,
+ do_icmp_redir = -1,
+ do_icmp_errors = -1,
+ do_icmp_echos = -1,
+ do_icmp_groupmemb = -1,
+ do_icmp_router = -1,
+ do_icmp_neighbor = -1,
+ do_icmp_mldv2 = -1,
+ do_icmp_types = -1,
+ do_ect = -1;
+
+ static ARL_BASE *arl_base = NULL;
+
+ static unsigned long long Ip6InReceives = 0ULL;
+ static unsigned long long Ip6InHdrErrors = 0ULL;
+ static unsigned long long Ip6InTooBigErrors = 0ULL;
+ static unsigned long long Ip6InNoRoutes = 0ULL;
+ static unsigned long long Ip6InAddrErrors = 0ULL;
+ static unsigned long long Ip6InUnknownProtos = 0ULL;
+ static unsigned long long Ip6InTruncatedPkts = 0ULL;
+ static unsigned long long Ip6InDiscards = 0ULL;
+ static unsigned long long Ip6InDelivers = 0ULL;
+ static unsigned long long Ip6OutForwDatagrams = 0ULL;
+ static unsigned long long Ip6OutRequests = 0ULL;
+ static unsigned long long Ip6OutDiscards = 0ULL;
+ static unsigned long long Ip6OutNoRoutes = 0ULL;
+ static unsigned long long Ip6ReasmTimeout = 0ULL;
+ static unsigned long long Ip6ReasmReqds = 0ULL;
+ static unsigned long long Ip6ReasmOKs = 0ULL;
+ static unsigned long long Ip6ReasmFails = 0ULL;
+ static unsigned long long Ip6FragOKs = 0ULL;
+ static unsigned long long Ip6FragFails = 0ULL;
+ static unsigned long long Ip6FragCreates = 0ULL;
+ static unsigned long long Ip6InMcastPkts = 0ULL;
+ static unsigned long long Ip6OutMcastPkts = 0ULL;
+ static unsigned long long Ip6InOctets = 0ULL;
+ static unsigned long long Ip6OutOctets = 0ULL;
+ static unsigned long long Ip6InMcastOctets = 0ULL;
+ static unsigned long long Ip6OutMcastOctets = 0ULL;
+ static unsigned long long Ip6InBcastOctets = 0ULL;
+ static unsigned long long Ip6OutBcastOctets = 0ULL;
+ static unsigned long long Ip6InNoECTPkts = 0ULL;
+ static unsigned long long Ip6InECT1Pkts = 0ULL;
+ static unsigned long long Ip6InECT0Pkts = 0ULL;
+ static unsigned long long Ip6InCEPkts = 0ULL;
+ static unsigned long long Icmp6InMsgs = 0ULL;
+ static unsigned long long Icmp6InErrors = 0ULL;
+ static unsigned long long Icmp6OutMsgs = 0ULL;
+ static unsigned long long Icmp6OutErrors = 0ULL;
+ static unsigned long long Icmp6InCsumErrors = 0ULL;
+ static unsigned long long Icmp6InDestUnreachs = 0ULL;
+ static unsigned long long Icmp6InPktTooBigs = 0ULL;
+ static unsigned long long Icmp6InTimeExcds = 0ULL;
+ static unsigned long long Icmp6InParmProblems = 0ULL;
+ static unsigned long long Icmp6InEchos = 0ULL;
+ static unsigned long long Icmp6InEchoReplies = 0ULL;
+ static unsigned long long Icmp6InGroupMembQueries = 0ULL;
+ static unsigned long long Icmp6InGroupMembResponses = 0ULL;
+ static unsigned long long Icmp6InGroupMembReductions = 0ULL;
+ static unsigned long long Icmp6InRouterSolicits = 0ULL;
+ static unsigned long long Icmp6InRouterAdvertisements = 0ULL;
+ static unsigned long long Icmp6InNeighborSolicits = 0ULL;
+ static unsigned long long Icmp6InNeighborAdvertisements = 0ULL;
+ static unsigned long long Icmp6InRedirects = 0ULL;
+ static unsigned long long Icmp6InMLDv2Reports = 0ULL;
+ static unsigned long long Icmp6OutDestUnreachs = 0ULL;
+ static unsigned long long Icmp6OutPktTooBigs = 0ULL;
+ static unsigned long long Icmp6OutTimeExcds = 0ULL;
+ static unsigned long long Icmp6OutParmProblems = 0ULL;
+ static unsigned long long Icmp6OutEchos = 0ULL;
+ static unsigned long long Icmp6OutEchoReplies = 0ULL;
+ static unsigned long long Icmp6OutGroupMembQueries = 0ULL;
+ static unsigned long long Icmp6OutGroupMembResponses = 0ULL;
+ static unsigned long long Icmp6OutGroupMembReductions = 0ULL;
+ static unsigned long long Icmp6OutRouterSolicits = 0ULL;
+ static unsigned long long Icmp6OutRouterAdvertisements = 0ULL;
+ static unsigned long long Icmp6OutNeighborSolicits = 0ULL;
+ static unsigned long long Icmp6OutNeighborAdvertisements = 0ULL;
+ static unsigned long long Icmp6OutRedirects = 0ULL;
+ static unsigned long long Icmp6OutMLDv2Reports = 0ULL;
+ static unsigned long long Icmp6InType1 = 0ULL;
+ static unsigned long long Icmp6InType128 = 0ULL;
+ static unsigned long long Icmp6InType129 = 0ULL;
+ static unsigned long long Icmp6InType136 = 0ULL;
+ static unsigned long long Icmp6OutType1 = 0ULL;
+ static unsigned long long Icmp6OutType128 = 0ULL;
+ static unsigned long long Icmp6OutType129 = 0ULL;
+ static unsigned long long Icmp6OutType133 = 0ULL;
+ static unsigned long long Icmp6OutType135 = 0ULL;
+ static unsigned long long Icmp6OutType143 = 0ULL;
+ static unsigned long long Udp6InDatagrams = 0ULL;
+ static unsigned long long Udp6NoPorts = 0ULL;
+ static unsigned long long Udp6InErrors = 0ULL;
+ static unsigned long long Udp6OutDatagrams = 0ULL;
+ static unsigned long long Udp6RcvbufErrors = 0ULL;
+ static unsigned long long Udp6SndbufErrors = 0ULL;
+ static unsigned long long Udp6InCsumErrors = 0ULL;
+ static unsigned long long Udp6IgnoredMulti = 0ULL;
+ static unsigned long long UdpLite6InDatagrams = 0ULL;
+ static unsigned long long UdpLite6NoPorts = 0ULL;
+ static unsigned long long UdpLite6InErrors = 0ULL;
+ static unsigned long long UdpLite6OutDatagrams = 0ULL;
+ static unsigned long long UdpLite6RcvbufErrors = 0ULL;
+ static unsigned long long UdpLite6SndbufErrors = 0ULL;
+ static unsigned long long UdpLite6InCsumErrors = 0ULL;
+
+ if(unlikely(!arl_base)) {
+ do_ip_packets = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ipv6 packets", CONFIG_BOOLEAN_AUTO);
+ do_ip_fragsout = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ipv6 fragments sent", CONFIG_BOOLEAN_AUTO);
+ do_ip_fragsin = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ipv6 fragments assembly", CONFIG_BOOLEAN_AUTO);
+ do_ip_errors = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ipv6 errors", CONFIG_BOOLEAN_AUTO);
+ do_udp_packets = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ipv6 UDP packets", CONFIG_BOOLEAN_AUTO);
+ do_udp_errors = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ipv6 UDP errors", CONFIG_BOOLEAN_AUTO);
+ do_udplite_packets = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ipv6 UDPlite packets", CONFIG_BOOLEAN_AUTO);
+ do_udplite_errors = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ipv6 UDPlite errors", CONFIG_BOOLEAN_AUTO);
+ do_bandwidth = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "bandwidth", CONFIG_BOOLEAN_AUTO);
+ do_mcast = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "multicast bandwidth", CONFIG_BOOLEAN_AUTO);
+ do_bcast = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "broadcast bandwidth", CONFIG_BOOLEAN_AUTO);
+ do_mcast_p = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "multicast packets", CONFIG_BOOLEAN_AUTO);
+ do_icmp = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp", CONFIG_BOOLEAN_AUTO);
+ do_icmp_redir = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp redirects", CONFIG_BOOLEAN_AUTO);
+ do_icmp_errors = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp errors", CONFIG_BOOLEAN_AUTO);
+ do_icmp_echos = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp echos", CONFIG_BOOLEAN_AUTO);
+ do_icmp_groupmemb = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp group membership", CONFIG_BOOLEAN_AUTO);
+ do_icmp_router = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp router", CONFIG_BOOLEAN_AUTO);
+ do_icmp_neighbor = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp neighbor", CONFIG_BOOLEAN_AUTO);
+ do_icmp_mldv2 = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp mldv2", CONFIG_BOOLEAN_AUTO);
+ do_icmp_types = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp types", CONFIG_BOOLEAN_AUTO);
+ do_ect = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ect", CONFIG_BOOLEAN_AUTO);
+
+ arl_base = arl_create("snmp6", NULL, 60);
+ arl_expect(arl_base, "Ip6InReceives", &Ip6InReceives);
+ arl_expect(arl_base, "Ip6InHdrErrors", &Ip6InHdrErrors);
+ arl_expect(arl_base, "Ip6InTooBigErrors", &Ip6InTooBigErrors);
+ arl_expect(arl_base, "Ip6InNoRoutes", &Ip6InNoRoutes);
+ arl_expect(arl_base, "Ip6InAddrErrors", &Ip6InAddrErrors);
+ arl_expect(arl_base, "Ip6InUnknownProtos", &Ip6InUnknownProtos);
+ arl_expect(arl_base, "Ip6InTruncatedPkts", &Ip6InTruncatedPkts);
+ arl_expect(arl_base, "Ip6InDiscards", &Ip6InDiscards);
+ arl_expect(arl_base, "Ip6InDelivers", &Ip6InDelivers);
+ arl_expect(arl_base, "Ip6OutForwDatagrams", &Ip6OutForwDatagrams);
+ arl_expect(arl_base, "Ip6OutRequests", &Ip6OutRequests);
+ arl_expect(arl_base, "Ip6OutDiscards", &Ip6OutDiscards);
+ arl_expect(arl_base, "Ip6OutNoRoutes", &Ip6OutNoRoutes);
+ arl_expect(arl_base, "Ip6ReasmTimeout", &Ip6ReasmTimeout);
+ arl_expect(arl_base, "Ip6ReasmReqds", &Ip6ReasmReqds);
+ arl_expect(arl_base, "Ip6ReasmOKs", &Ip6ReasmOKs);
+ arl_expect(arl_base, "Ip6ReasmFails", &Ip6ReasmFails);
+ arl_expect(arl_base, "Ip6FragOKs", &Ip6FragOKs);
+ arl_expect(arl_base, "Ip6FragFails", &Ip6FragFails);
+ arl_expect(arl_base, "Ip6FragCreates", &Ip6FragCreates);
+ arl_expect(arl_base, "Ip6InMcastPkts", &Ip6InMcastPkts);
+ arl_expect(arl_base, "Ip6OutMcastPkts", &Ip6OutMcastPkts);
+ arl_expect(arl_base, "Ip6InOctets", &Ip6InOctets);
+ arl_expect(arl_base, "Ip6OutOctets", &Ip6OutOctets);
+ arl_expect(arl_base, "Ip6InMcastOctets", &Ip6InMcastOctets);
+ arl_expect(arl_base, "Ip6OutMcastOctets", &Ip6OutMcastOctets);
+ arl_expect(arl_base, "Ip6InBcastOctets", &Ip6InBcastOctets);
+ arl_expect(arl_base, "Ip6OutBcastOctets", &Ip6OutBcastOctets);
+ arl_expect(arl_base, "Ip6InNoECTPkts", &Ip6InNoECTPkts);
+ arl_expect(arl_base, "Ip6InECT1Pkts", &Ip6InECT1Pkts);
+ arl_expect(arl_base, "Ip6InECT0Pkts", &Ip6InECT0Pkts);
+ arl_expect(arl_base, "Ip6InCEPkts", &Ip6InCEPkts);
+ arl_expect(arl_base, "Icmp6InMsgs", &Icmp6InMsgs);
+ arl_expect(arl_base, "Icmp6InErrors", &Icmp6InErrors);
+ arl_expect(arl_base, "Icmp6OutMsgs", &Icmp6OutMsgs);
+ arl_expect(arl_base, "Icmp6OutErrors", &Icmp6OutErrors);
+ arl_expect(arl_base, "Icmp6InCsumErrors", &Icmp6InCsumErrors);
+ arl_expect(arl_base, "Icmp6InDestUnreachs", &Icmp6InDestUnreachs);
+ arl_expect(arl_base, "Icmp6InPktTooBigs", &Icmp6InPktTooBigs);
+ arl_expect(arl_base, "Icmp6InTimeExcds", &Icmp6InTimeExcds);
+ arl_expect(arl_base, "Icmp6InParmProblems", &Icmp6InParmProblems);
+ arl_expect(arl_base, "Icmp6InEchos", &Icmp6InEchos);
+ arl_expect(arl_base, "Icmp6InEchoReplies", &Icmp6InEchoReplies);
+ arl_expect(arl_base, "Icmp6InGroupMembQueries", &Icmp6InGroupMembQueries);
+ arl_expect(arl_base, "Icmp6InGroupMembResponses", &Icmp6InGroupMembResponses);
+ arl_expect(arl_base, "Icmp6InGroupMembReductions", &Icmp6InGroupMembReductions);
+ arl_expect(arl_base, "Icmp6InRouterSolicits", &Icmp6InRouterSolicits);
+ arl_expect(arl_base, "Icmp6InRouterAdvertisements", &Icmp6InRouterAdvertisements);
+ arl_expect(arl_base, "Icmp6InNeighborSolicits", &Icmp6InNeighborSolicits);
+ arl_expect(arl_base, "Icmp6InNeighborAdvertisements", &Icmp6InNeighborAdvertisements);
+ arl_expect(arl_base, "Icmp6InRedirects", &Icmp6InRedirects);
+ arl_expect(arl_base, "Icmp6InMLDv2Reports", &Icmp6InMLDv2Reports);
+ arl_expect(arl_base, "Icmp6OutDestUnreachs", &Icmp6OutDestUnreachs);
+ arl_expect(arl_base, "Icmp6OutPktTooBigs", &Icmp6OutPktTooBigs);
+ arl_expect(arl_base, "Icmp6OutTimeExcds", &Icmp6OutTimeExcds);
+ arl_expect(arl_base, "Icmp6OutParmProblems", &Icmp6OutParmProblems);
+ arl_expect(arl_base, "Icmp6OutEchos", &Icmp6OutEchos);
+ arl_expect(arl_base, "Icmp6OutEchoReplies", &Icmp6OutEchoReplies);
+ arl_expect(arl_base, "Icmp6OutGroupMembQueries", &Icmp6OutGroupMembQueries);
+ arl_expect(arl_base, "Icmp6OutGroupMembResponses", &Icmp6OutGroupMembResponses);
+ arl_expect(arl_base, "Icmp6OutGroupMembReductions", &Icmp6OutGroupMembReductions);
+ arl_expect(arl_base, "Icmp6OutRouterSolicits", &Icmp6OutRouterSolicits);
+ arl_expect(arl_base, "Icmp6OutRouterAdvertisements", &Icmp6OutRouterAdvertisements);
+ arl_expect(arl_base, "Icmp6OutNeighborSolicits", &Icmp6OutNeighborSolicits);
+ arl_expect(arl_base, "Icmp6OutNeighborAdvertisements", &Icmp6OutNeighborAdvertisements);
+ arl_expect(arl_base, "Icmp6OutRedirects", &Icmp6OutRedirects);
+ arl_expect(arl_base, "Icmp6OutMLDv2Reports", &Icmp6OutMLDv2Reports);
+ arl_expect(arl_base, "Icmp6InType1", &Icmp6InType1);
+ arl_expect(arl_base, "Icmp6InType128", &Icmp6InType128);
+ arl_expect(arl_base, "Icmp6InType129", &Icmp6InType129);
+ arl_expect(arl_base, "Icmp6InType136", &Icmp6InType136);
+ arl_expect(arl_base, "Icmp6OutType1", &Icmp6OutType1);
+ arl_expect(arl_base, "Icmp6OutType128", &Icmp6OutType128);
+ arl_expect(arl_base, "Icmp6OutType129", &Icmp6OutType129);
+ arl_expect(arl_base, "Icmp6OutType133", &Icmp6OutType133);
+ arl_expect(arl_base, "Icmp6OutType135", &Icmp6OutType135);
+ arl_expect(arl_base, "Icmp6OutType143", &Icmp6OutType143);
+ arl_expect(arl_base, "Udp6InDatagrams", &Udp6InDatagrams);
+ arl_expect(arl_base, "Udp6NoPorts", &Udp6NoPorts);
+ arl_expect(arl_base, "Udp6InErrors", &Udp6InErrors);
+ arl_expect(arl_base, "Udp6OutDatagrams", &Udp6OutDatagrams);
+ arl_expect(arl_base, "Udp6RcvbufErrors", &Udp6RcvbufErrors);
+ arl_expect(arl_base, "Udp6SndbufErrors", &Udp6SndbufErrors);
+ arl_expect(arl_base, "Udp6InCsumErrors", &Udp6InCsumErrors);
+ arl_expect(arl_base, "Udp6IgnoredMulti", &Udp6IgnoredMulti);
+ arl_expect(arl_base, "UdpLite6InDatagrams", &UdpLite6InDatagrams);
+ arl_expect(arl_base, "UdpLite6NoPorts", &UdpLite6NoPorts);
+ arl_expect(arl_base, "UdpLite6InErrors", &UdpLite6InErrors);
+ arl_expect(arl_base, "UdpLite6OutDatagrams", &UdpLite6OutDatagrams);
+ arl_expect(arl_base, "UdpLite6RcvbufErrors", &UdpLite6RcvbufErrors);
+ arl_expect(arl_base, "UdpLite6SndbufErrors", &UdpLite6SndbufErrors);
+ arl_expect(arl_base, "UdpLite6InCsumErrors", &UdpLite6InCsumErrors);
+ }
+
+ if(unlikely(!ff)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/snmp6");
+ ff = procfile_open(config_get("plugin:proc:/proc/net/snmp6", "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff))
+ return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff))
+ return 0; // we return 0, so that we will retry to open it next time
+
+ size_t lines = procfile_lines(ff), l;
+
+ arl_begin(arl_base);
+
+ for(l = 0; l < lines ;l++) {
+ size_t words = procfile_linewords(ff, l);
+ if(unlikely(words < 2)) {
+ if(unlikely(words)) error("Cannot read /proc/net/snmp6 line %zu. Expected 2 params, read %zu.", l, words);
+ continue;
+ }
+
+ if(unlikely(arl_check(arl_base,
+ procfile_lineword(ff, l, 0),
+ procfile_lineword(ff, l, 1)))) break;
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_bandwidth == CONFIG_BOOLEAN_YES || (do_bandwidth == CONFIG_BOOLEAN_AUTO && (Ip6InOctets || Ip6OutOctets))) {
+ do_bandwidth = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_received = NULL,
+ *rd_sent = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system"
+ , "ipv6"
+ , NULL
+ , "network"
+ , NULL
+ , "IPv6 Bandwidth"
+ , "kilobits/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_SYSTEM_IPV6
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_received = rrddim_add(st, "InOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ rd_sent = rrddim_add(st, "OutOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_received, Ip6InOctets);
+ rrddim_set_by_pointer(st, rd_sent, Ip6OutOctets);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_ip_packets == CONFIG_BOOLEAN_YES || (do_ip_packets == CONFIG_BOOLEAN_AUTO && (Ip6InReceives || Ip6OutRequests || Ip6InDelivers || Ip6OutForwDatagrams))) {
+ do_ip_packets = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_received = NULL,
+ *rd_sent = NULL,
+ *rd_forwarded = NULL,
+ *rd_delivers = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP6
+ , "packets"
+ , NULL
+ , "packets"
+ , NULL
+ , "IPv6 Packets"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_IPV6_PACKETS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_received = rrddim_add(st, "InReceives", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_sent = rrddim_add(st, "OutRequests", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_forwarded = rrddim_add(st, "OutForwDatagrams", "forwarded", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_delivers = rrddim_add(st, "InDelivers", "delivers", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_received, Ip6InReceives);
+ rrddim_set_by_pointer(st, rd_sent, Ip6OutRequests);
+ rrddim_set_by_pointer(st, rd_forwarded, Ip6OutForwDatagrams);
+ rrddim_set_by_pointer(st, rd_delivers, Ip6InDelivers);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_ip_fragsout == CONFIG_BOOLEAN_YES || (do_ip_fragsout == CONFIG_BOOLEAN_AUTO && (Ip6FragOKs || Ip6FragFails || Ip6FragCreates))) {
+ do_ip_fragsout = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_ok = NULL,
+ *rd_failed = NULL,
+ *rd_all = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP6
+ , "fragsout"
+ , NULL
+ , "fragments6"
+ , NULL
+ , "IPv6 Fragments Sent"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_IPV6_FRAGSOUT
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_ok = rrddim_add(st, "FragOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_failed = rrddim_add(st, "FragFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_all = rrddim_add(st, "FragCreates", "all", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_ok, Ip6FragOKs);
+ rrddim_set_by_pointer(st, rd_failed, Ip6FragFails);
+ rrddim_set_by_pointer(st, rd_all, Ip6FragCreates);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_ip_fragsin == CONFIG_BOOLEAN_YES || (do_ip_fragsin == CONFIG_BOOLEAN_AUTO
+ && (
+ Ip6ReasmOKs
+ || Ip6ReasmFails
+ || Ip6ReasmTimeout
+ || Ip6ReasmReqds
+ ))) {
+ do_ip_fragsin = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_ok = NULL,
+ *rd_failed = NULL,
+ *rd_timeout = NULL,
+ *rd_all = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP6
+ , "fragsin"
+ , NULL
+ , "fragments6"
+ , NULL
+ , "IPv6 Fragments Reassembly"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_IPV6_FRAGSIN
+ , update_every
+ , RRDSET_TYPE_LINE);
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_ok = rrddim_add(st, "ReasmOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_failed = rrddim_add(st, "ReasmFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_timeout = rrddim_add(st, "ReasmTimeout", "timeout", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_all = rrddim_add(st, "ReasmReqds", "all", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_ok, Ip6ReasmOKs);
+ rrddim_set_by_pointer(st, rd_failed, Ip6ReasmFails);
+ rrddim_set_by_pointer(st, rd_timeout, Ip6ReasmTimeout);
+ rrddim_set_by_pointer(st, rd_all, Ip6ReasmReqds);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_ip_errors == CONFIG_BOOLEAN_YES || (do_ip_errors == CONFIG_BOOLEAN_AUTO
+ && (
+ Ip6InDiscards
+ || Ip6OutDiscards
+ || Ip6InHdrErrors
+ || Ip6InAddrErrors
+ || Ip6InUnknownProtos
+ || Ip6InTooBigErrors
+ || Ip6InTruncatedPkts
+ || Ip6InNoRoutes
+ ))) {
+ do_ip_errors = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_InDiscards = NULL,
+ *rd_OutDiscards = NULL,
+ *rd_InHdrErrors = NULL,
+ *rd_InAddrErrors = NULL,
+ *rd_InUnknownProtos = NULL,
+ *rd_InTooBigErrors = NULL,
+ *rd_InTruncatedPkts = NULL,
+ *rd_InNoRoutes = NULL,
+ *rd_OutNoRoutes = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP6
+ , "errors"
+ , NULL
+ , "errors"
+ , NULL
+ , "IPv6 Errors"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_IPV6_ERRORS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_InDiscards = rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutDiscards = rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InHdrErrors = rrddim_add(st, "InHdrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InAddrErrors = rrddim_add(st, "InAddrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InUnknownProtos = rrddim_add(st, "InUnknownProtos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InTooBigErrors = rrddim_add(st, "InTooBigErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InTruncatedPkts = rrddim_add(st, "InTruncatedPkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InNoRoutes = rrddim_add(st, "InNoRoutes", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutNoRoutes = rrddim_add(st, "OutNoRoutes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_InDiscards, Ip6InDiscards);
+ rrddim_set_by_pointer(st, rd_OutDiscards, Ip6OutDiscards);
+ rrddim_set_by_pointer(st, rd_InHdrErrors, Ip6InHdrErrors);
+ rrddim_set_by_pointer(st, rd_InAddrErrors, Ip6InAddrErrors);
+ rrddim_set_by_pointer(st, rd_InUnknownProtos, Ip6InUnknownProtos);
+ rrddim_set_by_pointer(st, rd_InTooBigErrors, Ip6InTooBigErrors);
+ rrddim_set_by_pointer(st, rd_InTruncatedPkts, Ip6InTruncatedPkts);
+ rrddim_set_by_pointer(st, rd_InNoRoutes, Ip6InNoRoutes);
+ rrddim_set_by_pointer(st, rd_OutNoRoutes, Ip6OutNoRoutes);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_udp_packets == CONFIG_BOOLEAN_YES || (do_udp_packets == CONFIG_BOOLEAN_AUTO && (Udp6InDatagrams || Udp6OutDatagrams))) {
+ do_udp_packets = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_received = NULL,
+ *rd_sent = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP6
+ , "udppackets"
+ , NULL
+ , "udp6"
+ , NULL
+ , "IPv6 UDP Packets"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_IPV6_UDP_PACKETS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_received = rrddim_add(st, "InDatagrams", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_sent = rrddim_add(st, "OutDatagrams", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_received, Udp6InDatagrams);
+ rrddim_set_by_pointer(st, rd_sent, Udp6OutDatagrams);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_udp_errors == CONFIG_BOOLEAN_YES || (do_udp_errors == CONFIG_BOOLEAN_AUTO
+ && (
+ Udp6InErrors
+ || Udp6NoPorts
+ || Udp6RcvbufErrors
+ || Udp6SndbufErrors
+ || Udp6InCsumErrors
+ || Udp6IgnoredMulti
+ ))) {
+ do_udp_errors = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_RcvbufErrors = NULL,
+ *rd_SndbufErrors = NULL,
+ *rd_InErrors = NULL,
+ *rd_NoPorts = NULL,
+ *rd_InCsumErrors = NULL,
+ *rd_IgnoredMulti = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP6
+ , "udperrors"
+ , NULL
+ , "udp6"
+ , NULL
+ , "IPv6 UDP Errors"
+ , "events/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_IPV6_UDP_ERRORS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_RcvbufErrors = rrddim_add(st, "RcvbufErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_SndbufErrors = rrddim_add(st, "SndbufErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InErrors = rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_NoPorts = rrddim_add(st, "NoPorts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InCsumErrors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_IgnoredMulti = rrddim_add(st, "IgnoredMulti", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_RcvbufErrors, Udp6RcvbufErrors);
+ rrddim_set_by_pointer(st, rd_SndbufErrors, Udp6SndbufErrors);
+ rrddim_set_by_pointer(st, rd_InErrors, Udp6InErrors);
+ rrddim_set_by_pointer(st, rd_NoPorts, Udp6NoPorts);
+ rrddim_set_by_pointer(st, rd_InCsumErrors, Udp6InCsumErrors);
+ rrddim_set_by_pointer(st, rd_IgnoredMulti, Udp6IgnoredMulti);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_udplite_packets == CONFIG_BOOLEAN_YES || (do_udplite_packets == CONFIG_BOOLEAN_AUTO && (UdpLite6InDatagrams || UdpLite6OutDatagrams))) {
+ do_udplite_packets = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_received = NULL,
+ *rd_sent = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP6
+ , "udplitepackets"
+ , NULL
+ , "udplite6"
+ , NULL
+ , "IPv6 UDPlite Packets"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_IPV6_UDPLITE_PACKETS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_received = rrddim_add(st, "InDatagrams", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_sent = rrddim_add(st, "OutDatagrams", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_received, UdpLite6InDatagrams);
+ rrddim_set_by_pointer(st, rd_sent, UdpLite6OutDatagrams);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_udplite_errors == CONFIG_BOOLEAN_YES || (do_udplite_errors == CONFIG_BOOLEAN_AUTO
+ && (
+ UdpLite6InErrors
+ || UdpLite6NoPorts
+ || UdpLite6RcvbufErrors
+ || UdpLite6SndbufErrors
+ || Udp6InCsumErrors
+ || UdpLite6InCsumErrors
+ ))) {
+ do_udplite_errors = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_RcvbufErrors = NULL,
+ *rd_SndbufErrors = NULL,
+ *rd_InErrors = NULL,
+ *rd_NoPorts = NULL,
+ *rd_InCsumErrors = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP6
+ , "udpliteerrors"
+ , NULL
+ , "udplite6"
+ , NULL
+ , "IPv6 UDP Lite Errors"
+ , "events/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_IPV6_UDPLITE_ERRORS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_RcvbufErrors = rrddim_add(st, "RcvbufErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_SndbufErrors = rrddim_add(st, "SndbufErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InErrors = rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_NoPorts = rrddim_add(st, "NoPorts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InCsumErrors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_InErrors, UdpLite6InErrors);
+ rrddim_set_by_pointer(st, rd_NoPorts, UdpLite6NoPorts);
+ rrddim_set_by_pointer(st, rd_RcvbufErrors, UdpLite6RcvbufErrors);
+ rrddim_set_by_pointer(st, rd_SndbufErrors, UdpLite6SndbufErrors);
+ rrddim_set_by_pointer(st, rd_InCsumErrors, UdpLite6InCsumErrors);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_mcast == CONFIG_BOOLEAN_YES || (do_mcast == CONFIG_BOOLEAN_AUTO && (Ip6OutMcastOctets || Ip6InMcastOctets))) {
+ do_mcast = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_Ip6InMcastOctets = NULL,
+ *rd_Ip6OutMcastOctets = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP6
+ , "mcast"
+ , NULL
+ , "multicast6"
+ , NULL
+ , "IPv6 Multicast Bandwidth"
+ , "kilobits/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_IPV6_MCAST
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_Ip6InMcastOctets = rrddim_add(st, "InMcastOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ rd_Ip6OutMcastOctets = rrddim_add(st, "OutMcastOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_Ip6InMcastOctets, Ip6InMcastOctets);
+ rrddim_set_by_pointer(st, rd_Ip6OutMcastOctets, Ip6OutMcastOctets);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_bcast == CONFIG_BOOLEAN_YES || (do_bcast == CONFIG_BOOLEAN_AUTO && (Ip6OutBcastOctets || Ip6InBcastOctets))) {
+ do_bcast = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_Ip6InBcastOctets = NULL,
+ *rd_Ip6OutBcastOctets = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP6
+ , "bcast"
+ , NULL
+ , "broadcast6"
+ , NULL
+ , "IPv6 Broadcast Bandwidth"
+ , "kilobits/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_IPV6_BCAST
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_Ip6InBcastOctets = rrddim_add(st, "InBcastOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ rd_Ip6OutBcastOctets = rrddim_add(st, "OutBcastOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_Ip6InBcastOctets, Ip6InBcastOctets);
+ rrddim_set_by_pointer(st, rd_Ip6OutBcastOctets, Ip6OutBcastOctets);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_mcast_p == CONFIG_BOOLEAN_YES || (do_mcast_p == CONFIG_BOOLEAN_AUTO && (Ip6OutMcastPkts || Ip6InMcastPkts))) {
+ do_mcast_p = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_Ip6InMcastPkts = NULL,
+ *rd_Ip6OutMcastPkts = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP6
+ , "mcastpkts"
+ , NULL
+ , "multicast6"
+ , NULL
+ , "IPv6 Multicast Packets"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_IPV6_MCAST_PACKETS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_Ip6InMcastPkts = rrddim_add(st, "InMcastPkts", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_Ip6OutMcastPkts = rrddim_add(st, "OutMcastPkts", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_Ip6InMcastPkts, Ip6InMcastPkts);
+ rrddim_set_by_pointer(st, rd_Ip6OutMcastPkts, Ip6OutMcastPkts);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_icmp == CONFIG_BOOLEAN_YES || (do_icmp == CONFIG_BOOLEAN_AUTO && (Icmp6InMsgs || Icmp6OutMsgs))) {
+ do_icmp = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_Icmp6InMsgs = NULL,
+ *rd_Icmp6OutMsgs = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP6
+ , "icmp"
+ , NULL
+ , "icmp6"
+ , NULL
+ , "IPv6 ICMP Messages"
+ , "messages/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_IPV6_ICMP
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_Icmp6InMsgs = rrddim_add(st, "InMsgs", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_Icmp6OutMsgs = rrddim_add(st, "OutMsgs", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_Icmp6InMsgs, Icmp6InMsgs);
+ rrddim_set_by_pointer(st, rd_Icmp6OutMsgs, Icmp6OutMsgs);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_icmp_redir == CONFIG_BOOLEAN_YES || (do_icmp_redir == CONFIG_BOOLEAN_AUTO && (Icmp6InRedirects || Icmp6OutRedirects))) {
+ do_icmp_redir = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_Icmp6InRedirects = NULL,
+ *rd_Icmp6OutRedirects = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP6
+ , "icmpredir"
+ , NULL
+ , "icmp6"
+ , NULL
+ , "IPv6 ICMP Redirects"
+ , "redirects/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_IPV6_ICMP_REDIR
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_Icmp6InRedirects = rrddim_add(st, "InRedirects", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_Icmp6OutRedirects = rrddim_add(st, "OutRedirects", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_Icmp6InRedirects, Icmp6InRedirects);
+ rrddim_set_by_pointer(st, rd_Icmp6OutRedirects, Icmp6OutRedirects);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_icmp_errors == CONFIG_BOOLEAN_YES || (do_icmp_errors == CONFIG_BOOLEAN_AUTO
+ && (
+ Icmp6InErrors
+ || Icmp6OutErrors
+ || Icmp6InCsumErrors
+ || Icmp6InDestUnreachs
+ || Icmp6InPktTooBigs
+ || Icmp6InTimeExcds
+ || Icmp6InParmProblems
+ || Icmp6OutDestUnreachs
+ || Icmp6OutPktTooBigs
+ || Icmp6OutTimeExcds
+ || Icmp6OutParmProblems
+ ))) {
+ do_icmp_errors = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_InErrors = NULL,
+ *rd_OutErrors = NULL,
+ *rd_InCsumErrors = NULL,
+ *rd_InDestUnreachs = NULL,
+ *rd_InPktTooBigs = NULL,
+ *rd_InTimeExcds = NULL,
+ *rd_InParmProblems = NULL,
+ *rd_OutDestUnreachs = NULL,
+ *rd_OutPktTooBigs = NULL,
+ *rd_OutTimeExcds = NULL,
+ *rd_OutParmProblems = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP6
+ , "icmperrors"
+ , NULL
+ , "icmp6"
+ , NULL
+ , "IPv6 ICMP Errors"
+ , "errors/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_IPV6_ICMP_ERRORS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_InErrors = rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutErrors = rrddim_add(st, "OutErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InCsumErrors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InDestUnreachs = rrddim_add(st, "InDestUnreachs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InPktTooBigs = rrddim_add(st, "InPktTooBigs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InTimeExcds = rrddim_add(st, "InTimeExcds", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InParmProblems = rrddim_add(st, "InParmProblems", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutDestUnreachs = rrddim_add(st, "OutDestUnreachs", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutPktTooBigs = rrddim_add(st, "OutPktTooBigs", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutTimeExcds = rrddim_add(st, "OutTimeExcds", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutParmProblems = rrddim_add(st, "OutParmProblems", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_InErrors, Icmp6InErrors);
+ rrddim_set_by_pointer(st, rd_OutErrors, Icmp6OutErrors);
+ rrddim_set_by_pointer(st, rd_InCsumErrors, Icmp6InCsumErrors);
+ rrddim_set_by_pointer(st, rd_InDestUnreachs, Icmp6InDestUnreachs);
+ rrddim_set_by_pointer(st, rd_InPktTooBigs, Icmp6InPktTooBigs);
+ rrddim_set_by_pointer(st, rd_InTimeExcds, Icmp6InTimeExcds);
+ rrddim_set_by_pointer(st, rd_InParmProblems, Icmp6InParmProblems);
+ rrddim_set_by_pointer(st, rd_OutDestUnreachs, Icmp6OutDestUnreachs);
+ rrddim_set_by_pointer(st, rd_OutPktTooBigs, Icmp6OutPktTooBigs);
+ rrddim_set_by_pointer(st, rd_OutTimeExcds, Icmp6OutTimeExcds);
+ rrddim_set_by_pointer(st, rd_OutParmProblems, Icmp6OutParmProblems);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_icmp_echos == CONFIG_BOOLEAN_YES || (do_icmp_echos == CONFIG_BOOLEAN_AUTO
+ && (
+ Icmp6InEchos
+ || Icmp6OutEchos
+ || Icmp6InEchoReplies
+ || Icmp6OutEchoReplies
+ ))) {
+ do_icmp_echos = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_InEchos = NULL,
+ *rd_OutEchos = NULL,
+ *rd_InEchoReplies = NULL,
+ *rd_OutEchoReplies = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP6
+ , "icmpechos"
+ , NULL
+ , "icmp6"
+ , NULL
+ , "IPv6 ICMP Echo"
+ , "messages/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_IPV6_ICMP_ECHOS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_InEchos = rrddim_add(st, "InEchos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutEchos = rrddim_add(st, "OutEchos", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InEchoReplies = rrddim_add(st, "InEchoReplies", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutEchoReplies = rrddim_add(st, "OutEchoReplies", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_InEchos, Icmp6InEchos);
+ rrddim_set_by_pointer(st, rd_OutEchos, Icmp6OutEchos);
+ rrddim_set_by_pointer(st, rd_InEchoReplies, Icmp6InEchoReplies);
+ rrddim_set_by_pointer(st, rd_OutEchoReplies, Icmp6OutEchoReplies);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_icmp_groupmemb == CONFIG_BOOLEAN_YES || (do_icmp_groupmemb == CONFIG_BOOLEAN_AUTO
+ && (
+ Icmp6InGroupMembQueries
+ || Icmp6OutGroupMembQueries
+ || Icmp6InGroupMembResponses
+ || Icmp6OutGroupMembResponses
+ || Icmp6InGroupMembReductions
+ || Icmp6OutGroupMembReductions
+ ))) {
+ do_icmp_groupmemb = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_InQueries = NULL,
+ *rd_OutQueries = NULL,
+ *rd_InResponses = NULL,
+ *rd_OutResponses = NULL,
+ *rd_InReductions = NULL,
+ *rd_OutReductions = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP6
+ , "groupmemb"
+ , NULL
+ , "icmp6"
+ , NULL
+ , "IPv6 ICMP Group Membership"
+ , "messages/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_IPV6_ICMP_GROUPMEMB
+ , update_every
+ , RRDSET_TYPE_LINE);
+
+ rd_InQueries = rrddim_add(st, "InQueries", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutQueries = rrddim_add(st, "OutQueries", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InResponses = rrddim_add(st, "InResponses", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutResponses = rrddim_add(st, "OutResponses", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InReductions = rrddim_add(st, "InReductions", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutReductions = rrddim_add(st, "OutReductions", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_InQueries, Icmp6InGroupMembQueries);
+ rrddim_set_by_pointer(st, rd_OutQueries, Icmp6OutGroupMembQueries);
+ rrddim_set_by_pointer(st, rd_InResponses, Icmp6InGroupMembResponses);
+ rrddim_set_by_pointer(st, rd_OutResponses, Icmp6OutGroupMembResponses);
+ rrddim_set_by_pointer(st, rd_InReductions, Icmp6InGroupMembReductions);
+ rrddim_set_by_pointer(st, rd_OutReductions, Icmp6OutGroupMembReductions);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_icmp_router == CONFIG_BOOLEAN_YES || (do_icmp_router == CONFIG_BOOLEAN_AUTO
+ && (
+ Icmp6InRouterSolicits
+ || Icmp6OutRouterSolicits
+ || Icmp6InRouterAdvertisements
+ || Icmp6OutRouterAdvertisements
+ ))) {
+ do_icmp_router = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_InSolicits = NULL,
+ *rd_OutSolicits = NULL,
+ *rd_InAdvertisements = NULL,
+ *rd_OutAdvertisements = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP6
+ , "icmprouter"
+ , NULL
+ , "icmp6"
+ , NULL
+ , "IPv6 Router Messages"
+ , "messages/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_IPV6_ICMP_ROUTER
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_InSolicits = rrddim_add(st, "InSolicits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutSolicits = rrddim_add(st, "OutSolicits", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InAdvertisements = rrddim_add(st, "InAdvertisements", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutAdvertisements = rrddim_add(st, "OutAdvertisements", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_InSolicits, Icmp6InRouterSolicits);
+ rrddim_set_by_pointer(st, rd_OutSolicits, Icmp6OutRouterSolicits);
+ rrddim_set_by_pointer(st, rd_InAdvertisements, Icmp6InRouterAdvertisements);
+ rrddim_set_by_pointer(st, rd_OutAdvertisements, Icmp6OutRouterAdvertisements);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_icmp_neighbor == CONFIG_BOOLEAN_YES || (do_icmp_neighbor == CONFIG_BOOLEAN_AUTO
+ && (
+ Icmp6InNeighborSolicits
+ || Icmp6OutNeighborSolicits
+ || Icmp6InNeighborAdvertisements
+ || Icmp6OutNeighborAdvertisements
+ ))) {
+ do_icmp_neighbor = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_InSolicits = NULL,
+ *rd_OutSolicits = NULL,
+ *rd_InAdvertisements = NULL,
+ *rd_OutAdvertisements = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP6
+ , "icmpneighbor"
+ , NULL
+ , "icmp6"
+ , NULL
+ , "IPv6 Neighbor Messages"
+ , "messages/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_IPV6_ICMP_NEIGHBOR
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_InSolicits = rrddim_add(st, "InSolicits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutSolicits = rrddim_add(st, "OutSolicits", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InAdvertisements = rrddim_add(st, "InAdvertisements", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutAdvertisements = rrddim_add(st, "OutAdvertisements", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_InSolicits, Icmp6InNeighborSolicits);
+ rrddim_set_by_pointer(st, rd_OutSolicits, Icmp6OutNeighborSolicits);
+ rrddim_set_by_pointer(st, rd_InAdvertisements, Icmp6InNeighborAdvertisements);
+ rrddim_set_by_pointer(st, rd_OutAdvertisements, Icmp6OutNeighborAdvertisements);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_icmp_mldv2 == CONFIG_BOOLEAN_YES || (do_icmp_mldv2 == CONFIG_BOOLEAN_AUTO && (Icmp6InMLDv2Reports || Icmp6OutMLDv2Reports))) {
+ do_icmp_mldv2 = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_InMLDv2Reports = NULL,
+ *rd_OutMLDv2Reports = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP6
+ , "icmpmldv2"
+ , NULL
+ , "icmp6"
+ , NULL
+ , "IPv6 ICMP MLDv2 Reports"
+ , "reports/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_IPV6_ICMP_LDV2
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_InMLDv2Reports = rrddim_add(st, "InMLDv2Reports", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutMLDv2Reports = rrddim_add(st, "OutMLDv2Reports", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_InMLDv2Reports, Icmp6InMLDv2Reports);
+ rrddim_set_by_pointer(st, rd_OutMLDv2Reports, Icmp6OutMLDv2Reports);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_icmp_types == CONFIG_BOOLEAN_YES || (do_icmp_types == CONFIG_BOOLEAN_AUTO
+ && (
+ Icmp6InType1
+ || Icmp6InType128
+ || Icmp6InType129
+ || Icmp6InType136
+ || Icmp6OutType1
+ || Icmp6OutType128
+ || Icmp6OutType129
+ || Icmp6OutType133
+ || Icmp6OutType135
+ || Icmp6OutType143
+ ))) {
+ do_icmp_types = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_InType1 = NULL,
+ *rd_InType128 = NULL,
+ *rd_InType129 = NULL,
+ *rd_InType136 = NULL,
+ *rd_OutType1 = NULL,
+ *rd_OutType128 = NULL,
+ *rd_OutType129 = NULL,
+ *rd_OutType133 = NULL,
+ *rd_OutType135 = NULL,
+ *rd_OutType143 = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP6
+ , "icmptypes"
+ , NULL
+ , "icmp6"
+ , NULL
+ , "IPv6 ICMP Types"
+ , "messages/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_IPV6_ICMP_TYPES
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_InType1 = rrddim_add(st, "InType1", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InType128 = rrddim_add(st, "InType128", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InType129 = rrddim_add(st, "InType129", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InType136 = rrddim_add(st, "InType136", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutType1 = rrddim_add(st, "OutType1", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutType128 = rrddim_add(st, "OutType128", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutType129 = rrddim_add(st, "OutType129", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutType133 = rrddim_add(st, "OutType133", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutType135 = rrddim_add(st, "OutType135", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_OutType143 = rrddim_add(st, "OutType143", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_InType1, Icmp6InType1);
+ rrddim_set_by_pointer(st, rd_InType128, Icmp6InType128);
+ rrddim_set_by_pointer(st, rd_InType129, Icmp6InType129);
+ rrddim_set_by_pointer(st, rd_InType136, Icmp6InType136);
+ rrddim_set_by_pointer(st, rd_OutType1, Icmp6OutType1);
+ rrddim_set_by_pointer(st, rd_OutType128, Icmp6OutType128);
+ rrddim_set_by_pointer(st, rd_OutType129, Icmp6OutType129);
+ rrddim_set_by_pointer(st, rd_OutType133, Icmp6OutType133);
+ rrddim_set_by_pointer(st, rd_OutType135, Icmp6OutType135);
+ rrddim_set_by_pointer(st, rd_OutType143, Icmp6OutType143);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_ect == CONFIG_BOOLEAN_YES || (do_ect == CONFIG_BOOLEAN_AUTO
+ && (
+ Ip6InNoECTPkts
+ || Ip6InECT1Pkts
+ || Ip6InECT0Pkts
+ || Ip6InCEPkts
+ ))) {
+ do_ect = CONFIG_BOOLEAN_YES;
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_InNoECTPkts = NULL,
+ *rd_InECT1Pkts = NULL,
+ *rd_InECT0Pkts = NULL,
+ *rd_InCEPkts = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP6
+ , "ect"
+ , NULL
+ , "packets"
+ , NULL
+ , "IPv6 ECT Packets"
+ , "packets/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SNMP6_NAME
+ , NETDATA_CHART_PRIO_IPV6_ECT
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_InNoECTPkts = rrddim_add(st, "InNoECTPkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InECT1Pkts = rrddim_add(st, "InECT1Pkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InECT0Pkts = rrddim_add(st, "InECT0Pkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InCEPkts = rrddim_add(st, "InCEPkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_InNoECTPkts, Ip6InNoECTPkts);
+ rrddim_set_by_pointer(st, rd_InECT1Pkts, Ip6InECT1Pkts);
+ rrddim_set_by_pointer(st, rd_InECT0Pkts, Ip6InECT0Pkts);
+ rrddim_set_by_pointer(st, rd_InCEPkts, Ip6InCEPkts);
+ rrdset_done(st);
+ }
+
+ return 0;
+}
+
diff --git a/collectors/proc.plugin/proc_net_sockstat.c b/collectors/proc.plugin/proc_net_sockstat.c
new file mode 100644
index 000000000..0c3b6e196
--- /dev/null
+++ b/collectors/proc.plugin/proc_net_sockstat.c
@@ -0,0 +1,518 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME "/proc/net/sockstat"
+
+static struct proc_net_sockstat {
+ kernel_uint_t sockets_used;
+
+ kernel_uint_t tcp_inuse;
+ kernel_uint_t tcp_orphan;
+ kernel_uint_t tcp_tw;
+ kernel_uint_t tcp_alloc;
+ kernel_uint_t tcp_mem;
+
+ kernel_uint_t udp_inuse;
+ kernel_uint_t udp_mem;
+
+ kernel_uint_t udplite_inuse;
+
+ kernel_uint_t raw_inuse;
+
+ kernel_uint_t frag_inuse;
+ kernel_uint_t frag_memory;
+} sockstat_root = { 0 };
+
+
+static int read_tcp_mem(void) {
+ static char *filename = NULL;
+ static RRDVAR *tcp_mem_low_threshold = NULL,
+ *tcp_mem_pressure_threshold = NULL,
+ *tcp_mem_high_threshold = NULL;
+
+ if(unlikely(!tcp_mem_low_threshold)) {
+ tcp_mem_low_threshold = rrdvar_custom_host_variable_create(localhost, "tcp_mem_low");
+ tcp_mem_pressure_threshold = rrdvar_custom_host_variable_create(localhost, "tcp_mem_pressure");
+ tcp_mem_high_threshold = rrdvar_custom_host_variable_create(localhost, "tcp_mem_high");
+ }
+
+ if(unlikely(!filename)) {
+ char buffer[FILENAME_MAX + 1];
+ snprintfz(buffer, FILENAME_MAX, "%s/proc/sys/net/ipv4/tcp_mem", netdata_configured_host_prefix);
+ filename = strdupz(buffer);
+ }
+
+ char buffer[200 + 1], *start, *end;
+ if(read_file(filename, buffer, 200) != 0) return 1;
+ buffer[200] = '\0';
+
+ unsigned long long low = 0, pressure = 0, high = 0;
+
+ start = buffer;
+ low = strtoull(start, &end, 10);
+
+ start = end;
+ pressure = strtoull(start, &end, 10);
+
+ start = end;
+ high = strtoull(start, &end, 10);
+
+ // fprintf(stderr, "TCP MEM low = %llu, pressure = %llu, high = %llu\n", low, pressure, high);
+
+ rrdvar_custom_host_variable_set(localhost, tcp_mem_low_threshold, low * sysconf(_SC_PAGESIZE) / 1024.0);
+ rrdvar_custom_host_variable_set(localhost, tcp_mem_pressure_threshold, pressure * sysconf(_SC_PAGESIZE) / 1024.0);
+ rrdvar_custom_host_variable_set(localhost, tcp_mem_high_threshold, high * sysconf(_SC_PAGESIZE) / 1024.0);
+
+ return 0;
+}
+
+static kernel_uint_t read_tcp_max_orphans(void) {
+ static char *filename = NULL;
+ static RRDVAR *tcp_max_orphans_var = NULL;
+
+ if(unlikely(!filename)) {
+ char buffer[FILENAME_MAX + 1];
+ snprintfz(buffer, FILENAME_MAX, "%s/proc/sys/net/ipv4/tcp_max_orphans", netdata_configured_host_prefix);
+ filename = strdupz(buffer);
+ }
+
+ unsigned long long tcp_max_orphans = 0;
+ if(read_single_number_file(filename, &tcp_max_orphans) == 0) {
+
+ if(unlikely(!tcp_max_orphans_var))
+ tcp_max_orphans_var = rrdvar_custom_host_variable_create(localhost, "tcp_max_orphans");
+
+ rrdvar_custom_host_variable_set(localhost, tcp_max_orphans_var, tcp_max_orphans);
+ return tcp_max_orphans;
+ }
+
+ return 0;
+}
+
+int do_proc_net_sockstat(int update_every, usec_t dt) {
+ (void)dt;
+
+ static procfile *ff = NULL;
+
+ static uint32_t hash_sockets = 0,
+ hash_raw = 0,
+ hash_frag = 0,
+ hash_tcp = 0,
+ hash_udp = 0,
+ hash_udplite = 0;
+
+ static long long update_constants_every = 60, update_constants_count = 0;
+
+ static ARL_BASE *arl_sockets = NULL;
+ static ARL_BASE *arl_tcp = NULL;
+ static ARL_BASE *arl_udp = NULL;
+ static ARL_BASE *arl_udplite = NULL;
+ static ARL_BASE *arl_raw = NULL;
+ static ARL_BASE *arl_frag = NULL;
+
+ static int do_sockets = -1, do_tcp_sockets = -1, do_tcp_mem = -1, do_udp_sockets = -1, do_udp_mem = -1, do_udplite_sockets = -1, do_raw_sockets = -1, do_frag_sockets = -1, do_frag_mem = -1;
+
+ static char *keys[7] = { NULL };
+ static uint32_t hashes[7] = { 0 };
+ static ARL_BASE *bases[7] = { NULL };
+
+ if(unlikely(!arl_sockets)) {
+ do_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 sockets", CONFIG_BOOLEAN_AUTO);
+ do_tcp_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 TCP sockets", CONFIG_BOOLEAN_AUTO);
+ do_tcp_mem = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 TCP memory", CONFIG_BOOLEAN_AUTO);
+ do_udp_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 UDP sockets", CONFIG_BOOLEAN_AUTO);
+ do_udp_mem = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 UDP memory", CONFIG_BOOLEAN_AUTO);
+ do_udplite_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 UDPLITE sockets", CONFIG_BOOLEAN_AUTO);
+ do_raw_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 RAW sockets", CONFIG_BOOLEAN_AUTO);
+ do_frag_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 FRAG sockets", CONFIG_BOOLEAN_AUTO);
+ do_frag_mem = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 FRAG memory", CONFIG_BOOLEAN_AUTO);
+
+ update_constants_every = config_get_number("plugin:proc:/proc/net/sockstat", "update constants every", update_constants_every);
+ update_constants_count = update_constants_every;
+
+ arl_sockets = arl_create("sockstat/sockets", arl_callback_str2kernel_uint_t, 60);
+ arl_expect(arl_sockets, "used", &sockstat_root.sockets_used);
+
+ arl_tcp = arl_create("sockstat/TCP", arl_callback_str2kernel_uint_t, 60);
+ arl_expect(arl_tcp, "inuse", &sockstat_root.tcp_inuse);
+ arl_expect(arl_tcp, "orphan", &sockstat_root.tcp_orphan);
+ arl_expect(arl_tcp, "tw", &sockstat_root.tcp_tw);
+ arl_expect(arl_tcp, "alloc", &sockstat_root.tcp_alloc);
+ arl_expect(arl_tcp, "mem", &sockstat_root.tcp_mem);
+
+ arl_udp = arl_create("sockstat/UDP", arl_callback_str2kernel_uint_t, 60);
+ arl_expect(arl_udp, "inuse", &sockstat_root.udp_inuse);
+ arl_expect(arl_udp, "mem", &sockstat_root.udp_mem);
+
+ arl_udplite = arl_create("sockstat/UDPLITE", arl_callback_str2kernel_uint_t, 60);
+ arl_expect(arl_udplite, "inuse", &sockstat_root.udplite_inuse);
+
+ arl_raw = arl_create("sockstat/RAW", arl_callback_str2kernel_uint_t, 60);
+ arl_expect(arl_raw, "inuse", &sockstat_root.raw_inuse);
+
+ arl_frag = arl_create("sockstat/FRAG", arl_callback_str2kernel_uint_t, 60);
+ arl_expect(arl_frag, "inuse", &sockstat_root.frag_inuse);
+ arl_expect(arl_frag, "memory", &sockstat_root.frag_memory);
+
+ hash_sockets = simple_hash("sockets");
+ hash_tcp = simple_hash("TCP");
+ hash_udp = simple_hash("UDP");
+ hash_udplite = simple_hash("UDPLITE");
+ hash_raw = simple_hash("RAW");
+ hash_frag = simple_hash("FRAG");
+
+ keys[0] = "sockets"; hashes[0] = hash_sockets; bases[0] = arl_sockets;
+ keys[1] = "TCP"; hashes[1] = hash_tcp; bases[1] = arl_tcp;
+ keys[2] = "UDP"; hashes[2] = hash_udp; bases[2] = arl_udp;
+ keys[3] = "UDPLITE"; hashes[3] = hash_udplite; bases[3] = arl_udplite;
+ keys[4] = "RAW"; hashes[4] = hash_raw; bases[4] = arl_raw;
+ keys[5] = "FRAG"; hashes[5] = hash_frag; bases[5] = arl_frag;
+ keys[6] = NULL; // terminator
+ }
+
+ update_constants_count += update_every;
+ if(unlikely(update_constants_count > update_constants_every)) {
+ read_tcp_max_orphans();
+ read_tcp_mem();
+ update_constants_count = 0;
+ }
+
+ if(unlikely(!ff)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/sockstat");
+ ff = procfile_open(config_get("plugin:proc:/proc/net/sockstat", "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time
+
+ size_t lines = procfile_lines(ff), l;
+
+ for(l = 0; l < lines ;l++) {
+ size_t words = procfile_linewords(ff, l);
+ char *key = procfile_lineword(ff, l, 0);
+ uint32_t hash = simple_hash(key);
+
+ int k;
+ for(k = 0; keys[k] ; k++) {
+ if(unlikely(hash == hashes[k] && strcmp(key, keys[k]) == 0)) {
+ // fprintf(stderr, "KEY: '%s', l=%zu, w=1, words=%zu\n", key, l, words);
+ ARL_BASE *arl = bases[k];
+ arl_begin(arl);
+ size_t w = 1;
+
+ while(w + 1 < words) {
+ char *name = procfile_lineword(ff, l, w); w++;
+ char *value = procfile_lineword(ff, l, w); w++;
+ // fprintf(stderr, " > NAME '%s', VALUE '%s', l=%zu, w=%zu, words=%zu\n", name, value, l, w, words);
+ if(unlikely(arl_check(arl, name, value) != 0))
+ break;
+ }
+
+ break;
+ }
+ }
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(do_sockets == CONFIG_BOOLEAN_YES || (do_sockets == CONFIG_BOOLEAN_AUTO && sockstat_root.sockets_used)) {
+ do_sockets = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_used = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "sockstat_sockets"
+ , NULL
+ , "sockets"
+ , NULL
+ , "IPv4 Sockets Used"
+ , "sockets"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME
+ , NETDATA_CHART_PRIO_IPV4_SOCKETS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_used = rrddim_add(st, "used", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_used, (collected_number)sockstat_root.sockets_used);
+ rrdset_done(st);
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(do_tcp_sockets == CONFIG_BOOLEAN_YES || (do_tcp_sockets == CONFIG_BOOLEAN_AUTO && (sockstat_root.tcp_inuse || sockstat_root.tcp_orphan || sockstat_root.tcp_tw || sockstat_root.tcp_alloc))) {
+ do_tcp_sockets = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_inuse = NULL,
+ *rd_orphan = NULL,
+ *rd_timewait = NULL,
+ *rd_alloc = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "sockstat_tcp_sockets"
+ , NULL
+ , "tcp"
+ , NULL
+ , "IPv4 TCP Sockets"
+ , "sockets"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME
+ , NETDATA_CHART_PRIO_IPV4_TCP_SOCKETS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_alloc = rrddim_add(st, "alloc", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rd_orphan = rrddim_add(st, "orphan", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rd_timewait = rrddim_add(st, "timewait", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat_root.tcp_inuse);
+ rrddim_set_by_pointer(st, rd_orphan, (collected_number)sockstat_root.tcp_orphan);
+ rrddim_set_by_pointer(st, rd_timewait, (collected_number)sockstat_root.tcp_tw);
+ rrddim_set_by_pointer(st, rd_alloc, (collected_number)sockstat_root.tcp_alloc);
+ rrdset_done(st);
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(do_tcp_mem == CONFIG_BOOLEAN_YES || (do_tcp_mem == CONFIG_BOOLEAN_AUTO && sockstat_root.tcp_mem)) {
+ do_tcp_mem = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_mem = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "sockstat_tcp_mem"
+ , NULL
+ , "tcp"
+ , NULL
+ , "IPv4 TCP Sockets Memory"
+ , "KB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME
+ , NETDATA_CHART_PRIO_IPV4_TCP_MEM
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_mem = rrddim_add(st, "mem", NULL, sysconf(_SC_PAGESIZE), 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_mem, (collected_number)sockstat_root.tcp_mem);
+ rrdset_done(st);
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(do_udp_sockets == CONFIG_BOOLEAN_YES || (do_udp_sockets == CONFIG_BOOLEAN_AUTO && sockstat_root.udp_inuse)) {
+ do_udp_sockets = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_inuse = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "sockstat_udp_sockets"
+ , NULL
+ , "udp"
+ , NULL
+ , "IPv4 UDP Sockets"
+ , "sockets"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME
+ , NETDATA_CHART_PRIO_IPV4_UDP
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat_root.udp_inuse);
+ rrdset_done(st);
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(do_udp_mem == CONFIG_BOOLEAN_YES || (do_udp_mem == CONFIG_BOOLEAN_AUTO && sockstat_root.udp_mem)) {
+ do_udp_mem = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_mem = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "sockstat_udp_mem"
+ , NULL
+ , "udp"
+ , NULL
+ , "IPv4 UDP Sockets Memory"
+ , "KB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME
+ , NETDATA_CHART_PRIO_IPV4_UDP_MEM
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_mem = rrddim_add(st, "mem", NULL, sysconf(_SC_PAGESIZE), 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_mem, (collected_number)sockstat_root.udp_mem);
+ rrdset_done(st);
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(do_udplite_sockets == CONFIG_BOOLEAN_YES || (do_udplite_sockets == CONFIG_BOOLEAN_AUTO && sockstat_root.udplite_inuse)) {
+ do_udplite_sockets = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_inuse = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "sockstat_udplite_sockets"
+ , NULL
+ , "udplite"
+ , NULL
+ , "IPv4 UDPLITE Sockets"
+ , "sockets"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME
+ , NETDATA_CHART_PRIO_IPV4_UDPLITE
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat_root.udplite_inuse);
+ rrdset_done(st);
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(do_raw_sockets == CONFIG_BOOLEAN_YES || (do_raw_sockets == CONFIG_BOOLEAN_AUTO && sockstat_root.raw_inuse)) {
+ do_raw_sockets = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_inuse = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "sockstat_raw_sockets"
+ , NULL
+ , "raw"
+ , NULL
+ , "IPv4 RAW Sockets"
+ , "sockets"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME
+ , NETDATA_CHART_PRIO_IPV4_RAW
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat_root.raw_inuse);
+ rrdset_done(st);
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(do_frag_sockets == CONFIG_BOOLEAN_YES || (do_frag_sockets == CONFIG_BOOLEAN_AUTO && sockstat_root.frag_inuse)) {
+ do_frag_sockets = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_inuse = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "sockstat_frag_sockets"
+ , NULL
+ , "fragments"
+ , NULL
+ , "IPv4 FRAG Sockets"
+ , "fragments"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME
+ , NETDATA_CHART_PRIO_IPV4_FRAGMENTS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat_root.frag_inuse);
+ rrdset_done(st);
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(do_frag_mem == CONFIG_BOOLEAN_YES || (do_frag_mem == CONFIG_BOOLEAN_AUTO && sockstat_root.frag_memory)) {
+ do_frag_mem = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_mem = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv4"
+ , "sockstat_frag_mem"
+ , NULL
+ , "fragments"
+ , NULL
+ , "IPv4 FRAG Sockets Memory"
+ , "KB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME
+ , NETDATA_CHART_PRIO_IPV4_FRAGMENTS_MEM
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_mem = rrddim_add(st, "mem", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_mem, (collected_number)sockstat_root.frag_memory);
+ rrdset_done(st);
+ }
+
+ return 0;
+}
+
diff --git a/collectors/proc.plugin/proc_net_sockstat6.c b/collectors/proc.plugin/proc_net_sockstat6.c
new file mode 100644
index 000000000..687b9bdeb
--- /dev/null
+++ b/collectors/proc.plugin/proc_net_sockstat6.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_NET_SOCKSTAT6_NAME "/proc/net/sockstat6"
+
+static struct proc_net_sockstat6 {
+ kernel_uint_t tcp6_inuse;
+ kernel_uint_t udp6_inuse;
+ kernel_uint_t udplite6_inuse;
+ kernel_uint_t raw6_inuse;
+ kernel_uint_t frag6_inuse;
+} sockstat6_root = { 0 };
+
+int do_proc_net_sockstat6(int update_every, usec_t dt) {
+ (void)dt;
+
+ static procfile *ff = NULL;
+
+ static uint32_t hash_raw = 0,
+ hash_frag = 0,
+ hash_tcp = 0,
+ hash_udp = 0,
+ hash_udplite = 0;
+
+ static ARL_BASE *arl_tcp = NULL;
+ static ARL_BASE *arl_udp = NULL;
+ static ARL_BASE *arl_udplite = NULL;
+ static ARL_BASE *arl_raw = NULL;
+ static ARL_BASE *arl_frag = NULL;
+
+ static int do_tcp_sockets = -1, do_udp_sockets = -1, do_udplite_sockets = -1, do_raw_sockets = -1, do_frag_sockets = -1;
+
+ static char *keys[6] = { NULL };
+ static uint32_t hashes[6] = { 0 };
+ static ARL_BASE *bases[6] = { NULL };
+
+ if(unlikely(!arl_tcp)) {
+ do_tcp_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat6", "ipv6 TCP sockets", CONFIG_BOOLEAN_AUTO);
+ do_udp_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat6", "ipv6 UDP sockets", CONFIG_BOOLEAN_AUTO);
+ do_udplite_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat6", "ipv6 UDPLITE sockets", CONFIG_BOOLEAN_AUTO);
+ do_raw_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat6", "ipv6 RAW sockets", CONFIG_BOOLEAN_AUTO);
+ do_frag_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat6", "ipv6 FRAG sockets", CONFIG_BOOLEAN_AUTO);
+
+ arl_tcp = arl_create("sockstat6/TCP6", arl_callback_str2kernel_uint_t, 60);
+ arl_expect(arl_tcp, "inuse", &sockstat6_root.tcp6_inuse);
+
+ arl_udp = arl_create("sockstat6/UDP6", arl_callback_str2kernel_uint_t, 60);
+ arl_expect(arl_udp, "inuse", &sockstat6_root.udp6_inuse);
+
+ arl_udplite = arl_create("sockstat6/UDPLITE6", arl_callback_str2kernel_uint_t, 60);
+ arl_expect(arl_udplite, "inuse", &sockstat6_root.udplite6_inuse);
+
+ arl_raw = arl_create("sockstat6/RAW6", arl_callback_str2kernel_uint_t, 60);
+ arl_expect(arl_raw, "inuse", &sockstat6_root.raw6_inuse);
+
+ arl_frag = arl_create("sockstat6/FRAG6", arl_callback_str2kernel_uint_t, 60);
+ arl_expect(arl_frag, "inuse", &sockstat6_root.frag6_inuse);
+
+ hash_tcp = simple_hash("TCP6");
+ hash_udp = simple_hash("UDP6");
+ hash_udplite = simple_hash("UDPLITE6");
+ hash_raw = simple_hash("RAW6");
+ hash_frag = simple_hash("FRAG6");
+
+ keys[0] = "TCP6"; hashes[0] = hash_tcp; bases[0] = arl_tcp;
+ keys[1] = "UDP6"; hashes[1] = hash_udp; bases[1] = arl_udp;
+ keys[2] = "UDPLITE6"; hashes[2] = hash_udplite; bases[2] = arl_udplite;
+ keys[3] = "RAW6"; hashes[3] = hash_raw; bases[3] = arl_raw;
+ keys[4] = "FRAG6"; hashes[4] = hash_frag; bases[4] = arl_frag;
+ keys[5] = NULL; // terminator
+ }
+
+ if(unlikely(!ff)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/sockstat6");
+ ff = procfile_open(config_get("plugin:proc:/proc/net/sockstat6", "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time
+
+ size_t lines = procfile_lines(ff), l;
+
+ for(l = 0; l < lines ;l++) {
+ size_t words = procfile_linewords(ff, l);
+ char *key = procfile_lineword(ff, l, 0);
+ uint32_t hash = simple_hash(key);
+
+ int k;
+ for(k = 0; keys[k] ; k++) {
+ if(unlikely(hash == hashes[k] && strcmp(key, keys[k]) == 0)) {
+ // fprintf(stderr, "KEY: '%s', l=%zu, w=1, words=%zu\n", key, l, words);
+ ARL_BASE *arl = bases[k];
+ arl_begin(arl);
+ size_t w = 1;
+
+ while(w + 1 < words) {
+ char *name = procfile_lineword(ff, l, w); w++;
+ char *value = procfile_lineword(ff, l, w); w++;
+ // fprintf(stderr, " > NAME '%s', VALUE '%s', l=%zu, w=%zu, words=%zu\n", name, value, l, w, words);
+ if(unlikely(arl_check(arl, name, value) != 0))
+ break;
+ }
+
+ break;
+ }
+ }
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(do_tcp_sockets == CONFIG_BOOLEAN_YES || (do_tcp_sockets == CONFIG_BOOLEAN_AUTO && (sockstat6_root.tcp6_inuse))) {
+ do_tcp_sockets = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_inuse = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6"
+ , "sockstat6_tcp_sockets"
+ , NULL
+ , "tcp6"
+ , NULL
+ , "IPv6 TCP Sockets"
+ , "sockets"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SOCKSTAT6_NAME
+ , NETDATA_CHART_PRIO_IPV6_TCP
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat6_root.tcp6_inuse);
+ rrdset_done(st);
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(do_udp_sockets == CONFIG_BOOLEAN_YES || (do_udp_sockets == CONFIG_BOOLEAN_AUTO && sockstat6_root.udp6_inuse)) {
+ do_udp_sockets = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_inuse = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6"
+ , "sockstat6_udp_sockets"
+ , NULL
+ , "udp6"
+ , NULL
+ , "IPv6 UDP Sockets"
+ , "sockets"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SOCKSTAT6_NAME
+ , NETDATA_CHART_PRIO_IPV6_UDP
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat6_root.udp6_inuse);
+ rrdset_done(st);
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(do_udplite_sockets == CONFIG_BOOLEAN_YES || (do_udplite_sockets == CONFIG_BOOLEAN_AUTO && sockstat6_root.udplite6_inuse)) {
+ do_udplite_sockets = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_inuse = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6"
+ , "sockstat6_udplite_sockets"
+ , NULL
+ , "udplite6"
+ , NULL
+ , "IPv6 UDPLITE Sockets"
+ , "sockets"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SOCKSTAT6_NAME
+ , NETDATA_CHART_PRIO_IPV6_UDPLITE
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat6_root.udplite6_inuse);
+ rrdset_done(st);
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(do_raw_sockets == CONFIG_BOOLEAN_YES || (do_raw_sockets == CONFIG_BOOLEAN_AUTO && sockstat6_root.raw6_inuse)) {
+ do_raw_sockets = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_inuse = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6"
+ , "sockstat6_raw_sockets"
+ , NULL
+ , "raw6"
+ , NULL
+ , "IPv6 RAW Sockets"
+ , "sockets"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SOCKSTAT6_NAME
+ , NETDATA_CHART_PRIO_IPV6_RAW
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat6_root.raw6_inuse);
+ rrdset_done(st);
+ }
+
+ // ------------------------------------------------------------------------
+
+ if(do_frag_sockets == CONFIG_BOOLEAN_YES || (do_frag_sockets == CONFIG_BOOLEAN_AUTO && sockstat6_root.frag6_inuse)) {
+ do_frag_sockets = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_inuse = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "ipv6"
+ , "sockstat6_frag_sockets"
+ , NULL
+ , "fragments6"
+ , NULL
+ , "IPv6 FRAG Sockets"
+ , "fragments"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SOCKSTAT6_NAME
+ , NETDATA_CHART_PRIO_IPV6_FRAGMENTS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat6_root.frag6_inuse);
+ rrdset_done(st);
+ }
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_net_softnet_stat.c b/collectors/proc.plugin/proc_net_softnet_stat.c
new file mode 100644
index 000000000..7ec783e77
--- /dev/null
+++ b/collectors/proc.plugin/proc_net_softnet_stat.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_NET_SOFTNET_NAME "/proc/net/softnet_stat"
+
+static inline char *softnet_column_name(size_t column) {
+ switch(column) {
+ // https://github.com/torvalds/linux/blob/a7fd20d1c476af4563e66865213474a2f9f473a4/net/core/net-procfs.c#L161-L166
+ case 0: return "processed";
+ case 1: return "dropped";
+ case 2: return "squeezed";
+ case 9: return "received_rps";
+ case 10: return "flow_limit_count";
+ default: return NULL;
+ }
+}
+
+int do_proc_net_softnet_stat(int update_every, usec_t dt) {
+ (void)dt;
+
+ static procfile *ff = NULL;
+ static int do_per_core = -1;
+ static size_t allocated_lines = 0, allocated_columns = 0;
+ static uint32_t *data = NULL;
+
+ if(unlikely(do_per_core == -1)) do_per_core = config_get_boolean("plugin:proc:/proc/net/softnet_stat", "softnet_stat per core", 1);
+
+ if(unlikely(!ff)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/softnet_stat");
+ ff = procfile_open(config_get("plugin:proc:/proc/net/softnet_stat", "filename to monitor", filename), " \t", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time
+
+ size_t lines = procfile_lines(ff), l;
+ size_t words = procfile_linewords(ff, 0), w;
+
+ if(unlikely(!lines || !words)) {
+ error("Cannot read /proc/net/softnet_stat, %zu lines and %zu columns reported.", lines, words);
+ return 1;
+ }
+
+ if(unlikely(lines > 200)) lines = 200;
+ if(unlikely(words > 50)) words = 50;
+
+ if(unlikely(!data || lines > allocated_lines || words > allocated_columns)) {
+ freez(data);
+ allocated_lines = lines;
+ allocated_columns = words;
+ data = mallocz((allocated_lines + 1) * allocated_columns * sizeof(uint32_t));
+ }
+
+ // initialize to zero
+ memset(data, 0, (allocated_lines + 1) * allocated_columns * sizeof(uint32_t));
+
+ // parse the values
+ for(l = 0; l < lines ;l++) {
+ words = procfile_linewords(ff, l);
+ if(unlikely(!words)) continue;
+
+ if(unlikely(words > allocated_columns))
+ words = allocated_columns;
+
+ for(w = 0; w < words ; w++) {
+ if(unlikely(softnet_column_name(w))) {
+ uint32_t t = (uint32_t)strtoul(procfile_lineword(ff, l, w), NULL, 16);
+ data[w] += t;
+ data[((l + 1) * allocated_columns) + w] = t;
+ }
+ }
+ }
+
+ if(unlikely(data[(lines * allocated_columns)] == 0))
+ lines--;
+
+ RRDSET *st;
+
+ // --------------------------------------------------------------------
+
+ st = rrdset_find_bytype_localhost("system", "softnet_stat");
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system"
+ , "softnet_stat"
+ , NULL
+ , "softnet_stat"
+ , "system.softnet_stat"
+ , "System softnet_stat"
+ , "events/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SOFTNET_NAME
+ , NETDATA_CHART_PRIO_SYSTEM_SOFTNET_STAT
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ for(w = 0; w < allocated_columns ;w++)
+ if(unlikely(softnet_column_name(w)))
+ rrddim_add(st, softnet_column_name(w), NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ for(w = 0; w < allocated_columns ;w++)
+ if(unlikely(softnet_column_name(w)))
+ rrddim_set(st, softnet_column_name(w), data[w]);
+
+ rrdset_done(st);
+
+ if(do_per_core) {
+ for(l = 0; l < lines ;l++) {
+ char id[50+1];
+ snprintfz(id, 50, "cpu%zu_softnet_stat", l);
+
+ st = rrdset_find_bytype_localhost("cpu", id);
+ if(unlikely(!st)) {
+ char title[100+1];
+ snprintfz(title, 100, "CPU%zu softnet_stat", l);
+
+ st = rrdset_create_localhost(
+ "cpu"
+ , id
+ , NULL
+ , "softnet_stat"
+ , "cpu.softnet_stat"
+ , title
+ , "events/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_NET_SOFTNET_NAME
+ , NETDATA_CHART_PRIO_SOFTNET_PER_CORE + l
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ for(w = 0; w < allocated_columns ;w++)
+ if(unlikely(softnet_column_name(w)))
+ rrddim_add(st, softnet_column_name(w), NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ for(w = 0; w < allocated_columns ;w++)
+ if(unlikely(softnet_column_name(w)))
+ rrddim_set(st, softnet_column_name(w), data[((l + 1) * allocated_columns) + w]);
+
+ rrdset_done(st);
+ }
+ }
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_net_stat_conntrack.c b/collectors/proc.plugin/proc_net_stat_conntrack.c
new file mode 100644
index 000000000..f5257c0a0
--- /dev/null
+++ b/collectors/proc.plugin/proc_net_stat_conntrack.c
@@ -0,0 +1,351 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define RRD_TYPE_NET_STAT_NETFILTER "netfilter"
+#define RRD_TYPE_NET_STAT_CONNTRACK "conntrack"
+#define PLUGIN_PROC_MODULE_CONNTRACK_NAME "/proc/net/stat/nf_conntrack"
+
+int do_proc_net_stat_conntrack(int update_every, usec_t dt) {
+ static procfile *ff = NULL;
+ static int do_sockets = -1, do_new = -1, do_changes = -1, do_expect = -1, do_search = -1, do_errors = -1;
+ static usec_t get_max_every = 10 * USEC_PER_SEC, usec_since_last_max = 0;
+ static int read_full = 1;
+ static char *nf_conntrack_filename, *nf_conntrack_count_filename, *nf_conntrack_max_filename;
+ static RRDVAR *rrdvar_max = NULL;
+
+ unsigned long long aentries = 0, asearched = 0, afound = 0, anew = 0, ainvalid = 0, aignore = 0, adelete = 0, adelete_list = 0,
+ ainsert = 0, ainsert_failed = 0, adrop = 0, aearly_drop = 0, aicmp_error = 0, aexpect_new = 0, aexpect_create = 0, aexpect_delete = 0, asearch_restart = 0;
+
+ if(unlikely(do_sockets == -1)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/stat/nf_conntrack");
+ nf_conntrack_filename = config_get("plugin:proc:/proc/net/stat/nf_conntrack", "filename to monitor", filename);
+
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/sys/net/netfilter/nf_conntrack_max");
+ nf_conntrack_max_filename = config_get("plugin:proc:/proc/sys/net/netfilter/nf_conntrack_max", "filename to monitor", filename);
+ usec_since_last_max = get_max_every = config_get_number("plugin:proc:/proc/sys/net/netfilter/nf_conntrack_max", "read every seconds", 10) * USEC_PER_SEC;
+
+ read_full = 1;
+ ff = procfile_open(nf_conntrack_filename, " \t:", PROCFILE_FLAG_DEFAULT);
+ if(!ff) read_full = 0;
+
+ do_new = config_get_boolean("plugin:proc:/proc/net/stat/nf_conntrack", "netfilter new connections", read_full);
+ do_changes = config_get_boolean("plugin:proc:/proc/net/stat/nf_conntrack", "netfilter connection changes", read_full);
+ do_expect = config_get_boolean("plugin:proc:/proc/net/stat/nf_conntrack", "netfilter connection expectations", read_full);
+ do_search = config_get_boolean("plugin:proc:/proc/net/stat/nf_conntrack", "netfilter connection searches", read_full);
+ do_errors = config_get_boolean("plugin:proc:/proc/net/stat/nf_conntrack", "netfilter errors", read_full);
+
+ do_sockets = 1;
+ if(!read_full) {
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/sys/net/netfilter/nf_conntrack_count");
+ nf_conntrack_count_filename = config_get("plugin:proc:/proc/sys/net/netfilter/nf_conntrack_count", "filename to monitor", filename);
+
+ if(read_single_number_file(nf_conntrack_count_filename, &aentries))
+ do_sockets = 0;
+ }
+
+ do_sockets = config_get_boolean("plugin:proc:/proc/net/stat/nf_conntrack", "netfilter connections", do_sockets);
+
+ if(!do_sockets && !read_full)
+ return 1;
+
+ rrdvar_max = rrdvar_custom_host_variable_create(localhost, "netfilter.conntrack.max");
+ }
+
+ if(likely(read_full)) {
+ if(unlikely(!ff)) {
+ ff = procfile_open(nf_conntrack_filename, " \t:", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff))
+ return 0; // we return 0, so that we will retry to open it next time
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff))
+ return 0; // we return 0, so that we will retry to open it next time
+
+ size_t lines = procfile_lines(ff), l;
+
+ for(l = 1; l < lines ;l++) {
+ size_t words = procfile_linewords(ff, l);
+ if(unlikely(words < 17)) {
+ if(unlikely(words)) error("Cannot read /proc/net/stat/nf_conntrack line. Expected 17 params, read %zu.", words);
+ continue;
+ }
+
+ unsigned long long tentries = 0, tsearched = 0, tfound = 0, tnew = 0, tinvalid = 0, tignore = 0, tdelete = 0, tdelete_list = 0, tinsert = 0, tinsert_failed = 0, tdrop = 0, tearly_drop = 0, ticmp_error = 0, texpect_new = 0, texpect_create = 0, texpect_delete = 0, tsearch_restart = 0;
+
+ tentries = strtoull(procfile_lineword(ff, l, 0), NULL, 16);
+ tsearched = strtoull(procfile_lineword(ff, l, 1), NULL, 16);
+ tfound = strtoull(procfile_lineword(ff, l, 2), NULL, 16);
+ tnew = strtoull(procfile_lineword(ff, l, 3), NULL, 16);
+ tinvalid = strtoull(procfile_lineword(ff, l, 4), NULL, 16);
+ tignore = strtoull(procfile_lineword(ff, l, 5), NULL, 16);
+ tdelete = strtoull(procfile_lineword(ff, l, 6), NULL, 16);
+ tdelete_list = strtoull(procfile_lineword(ff, l, 7), NULL, 16);
+ tinsert = strtoull(procfile_lineword(ff, l, 8), NULL, 16);
+ tinsert_failed = strtoull(procfile_lineword(ff, l, 9), NULL, 16);
+ tdrop = strtoull(procfile_lineword(ff, l, 10), NULL, 16);
+ tearly_drop = strtoull(procfile_lineword(ff, l, 11), NULL, 16);
+ ticmp_error = strtoull(procfile_lineword(ff, l, 12), NULL, 16);
+ texpect_new = strtoull(procfile_lineword(ff, l, 13), NULL, 16);
+ texpect_create = strtoull(procfile_lineword(ff, l, 14), NULL, 16);
+ texpect_delete = strtoull(procfile_lineword(ff, l, 15), NULL, 16);
+ tsearch_restart = strtoull(procfile_lineword(ff, l, 16), NULL, 16);
+
+ if(unlikely(!aentries)) aentries = tentries;
+
+ // sum all the cpus together
+ asearched += tsearched; // conntrack.search
+ afound += tfound; // conntrack.search
+ anew += tnew; // conntrack.new
+ ainvalid += tinvalid; // conntrack.new
+ aignore += tignore; // conntrack.new
+ adelete += tdelete; // conntrack.changes
+ adelete_list += tdelete_list; // conntrack.changes
+ ainsert += tinsert; // conntrack.changes
+ ainsert_failed += tinsert_failed; // conntrack.errors
+ adrop += tdrop; // conntrack.errors
+ aearly_drop += tearly_drop; // conntrack.errors
+ aicmp_error += ticmp_error; // conntrack.errors
+ aexpect_new += texpect_new; // conntrack.expect
+ aexpect_create += texpect_create; // conntrack.expect
+ aexpect_delete += texpect_delete; // conntrack.expect
+ asearch_restart += tsearch_restart; // conntrack.search
+ }
+ }
+ else {
+ if(unlikely(read_single_number_file(nf_conntrack_count_filename, &aentries)))
+ return 0; // we return 0, so that we will retry to open it next time
+ }
+
+ usec_since_last_max += dt;
+ if(unlikely(rrdvar_max && usec_since_last_max >= get_max_every)) {
+ usec_since_last_max = 0;
+
+ unsigned long long max;
+ if(likely(!read_single_number_file(nf_conntrack_max_filename, &max)))
+ rrdvar_custom_host_variable_set(localhost, rrdvar_max, max);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_sockets) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_connections = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_STAT_NETFILTER
+ , RRD_TYPE_NET_STAT_CONNTRACK "_sockets"
+ , NULL
+ , RRD_TYPE_NET_STAT_CONNTRACK
+ , NULL
+ , "Connection Tracker Connections"
+ , "active connections"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_CONNTRACK_NAME
+ , NETDATA_CHART_PRIO_NETFILTER_SOCKETS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_connections = rrddim_add(st, "connections", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_connections, aentries);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_new) {
+ static RRDSET *st = NULL;
+ static RRDDIM
+ *rd_new = NULL,
+ *rd_ignore = NULL,
+ *rd_invalid = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_STAT_NETFILTER
+ , RRD_TYPE_NET_STAT_CONNTRACK "_new"
+ , NULL
+ , RRD_TYPE_NET_STAT_CONNTRACK
+ , NULL
+ , "Connection Tracker New Connections"
+ , "connections/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_CONNTRACK_NAME
+ , NETDATA_CHART_PRIO_NETFILTER_NEW
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_new = rrddim_add(st, "new", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_ignore = rrddim_add(st, "ignore", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_invalid = rrddim_add(st, "invalid", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_new, anew);
+ rrddim_set_by_pointer(st, rd_ignore, aignore);
+ rrddim_set_by_pointer(st, rd_invalid, ainvalid);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_changes) {
+ static RRDSET *st = NULL;
+ static RRDDIM
+ *rd_inserted = NULL,
+ *rd_deleted = NULL,
+ *rd_delete_list = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_STAT_NETFILTER
+ , RRD_TYPE_NET_STAT_CONNTRACK "_changes"
+ , NULL
+ , RRD_TYPE_NET_STAT_CONNTRACK
+ , NULL
+ , "Connection Tracker Changes"
+ , "changes/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_CONNTRACK_NAME
+ , NETDATA_CHART_PRIO_NETFILTER_CHANGES
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_inserted = rrddim_add(st, "inserted", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_deleted = rrddim_add(st, "deleted", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_delete_list = rrddim_add(st, "delete_list", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_inserted, ainsert);
+ rrddim_set_by_pointer(st, rd_deleted, adelete);
+ rrddim_set_by_pointer(st, rd_delete_list, adelete_list);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_expect) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_created = NULL,
+ *rd_deleted = NULL,
+ *rd_new = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_STAT_NETFILTER
+ , RRD_TYPE_NET_STAT_CONNTRACK "_expect"
+ , NULL
+ , RRD_TYPE_NET_STAT_CONNTRACK
+ , NULL
+ , "Connection Tracker Expectations"
+ , "expectations/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_CONNTRACK_NAME
+ , NETDATA_CHART_PRIO_NETFILTER_EXPECT
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_created = rrddim_add(st, "created", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_deleted = rrddim_add(st, "deleted", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_new = rrddim_add(st, "new", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_created, aexpect_create);
+ rrddim_set_by_pointer(st, rd_deleted, aexpect_delete);
+ rrddim_set_by_pointer(st, rd_new, aexpect_new);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_search) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_searched = NULL,
+ *rd_restarted = NULL,
+ *rd_found = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_STAT_NETFILTER
+ , RRD_TYPE_NET_STAT_CONNTRACK "_search"
+ , NULL
+ , RRD_TYPE_NET_STAT_CONNTRACK
+ , NULL
+ , "Connection Tracker Searches"
+ , "searches/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_CONNTRACK_NAME
+ , NETDATA_CHART_PRIO_NETFILTER_SEARCH
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_searched = rrddim_add(st, "searched", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_restarted = rrddim_add(st, "restarted", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_found = rrddim_add(st, "found", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_searched, asearched);
+ rrddim_set_by_pointer(st, rd_restarted, asearch_restart);
+ rrddim_set_by_pointer(st, rd_found, afound);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_errors) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_icmp_error = NULL,
+ *rd_insert_failed = NULL,
+ *rd_drop = NULL,
+ *rd_early_drop = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_STAT_NETFILTER
+ , RRD_TYPE_NET_STAT_CONNTRACK "_errors"
+ , NULL
+ , RRD_TYPE_NET_STAT_CONNTRACK
+ , NULL
+ , "Connection Tracker Errors"
+ , "events/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_CONNTRACK_NAME
+ , NETDATA_CHART_PRIO_NETFILTER_ERRORS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+
+ rd_icmp_error = rrddim_add(st, "icmp_error", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_insert_failed = rrddim_add(st, "insert_failed", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_drop = rrddim_add(st, "drop", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_early_drop = rrddim_add(st, "early_drop", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_icmp_error, aicmp_error);
+ rrddim_set_by_pointer(st, rd_insert_failed, ainsert_failed);
+ rrddim_set_by_pointer(st, rd_drop, adrop);
+ rrddim_set_by_pointer(st, rd_early_drop, aearly_drop);
+ rrdset_done(st);
+ }
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_net_stat_synproxy.c b/collectors/proc.plugin/proc_net_stat_synproxy.c
new file mode 100644
index 000000000..f0c1f47c1
--- /dev/null
+++ b/collectors/proc.plugin/proc_net_stat_synproxy.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_SYNPROXY_NAME "/proc/net/stat/synproxy"
+
+#define RRD_TYPE_NET_STAT_NETFILTER "netfilter"
+#define RRD_TYPE_NET_STAT_SYNPROXY "synproxy"
+
+int do_proc_net_stat_synproxy(int update_every, usec_t dt) {
+ (void)dt;
+
+ static int do_entries = -1, do_cookies = -1, do_syns = -1, do_reopened = -1;
+ static procfile *ff = NULL;
+
+ if(unlikely(do_entries == -1)) {
+ do_entries = config_get_boolean_ondemand("plugin:proc:/proc/net/stat/synproxy", "SYNPROXY entries", CONFIG_BOOLEAN_AUTO);
+ do_cookies = config_get_boolean_ondemand("plugin:proc:/proc/net/stat/synproxy", "SYNPROXY cookies", CONFIG_BOOLEAN_AUTO);
+ do_syns = config_get_boolean_ondemand("plugin:proc:/proc/net/stat/synproxy", "SYNPROXY SYN received", CONFIG_BOOLEAN_AUTO);
+ do_reopened = config_get_boolean_ondemand("plugin:proc:/proc/net/stat/synproxy", "SYNPROXY connections reopened", CONFIG_BOOLEAN_AUTO);
+ }
+
+ if(unlikely(!ff)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/stat/synproxy");
+ ff = procfile_open(config_get("plugin:proc:/proc/net/stat/synproxy", "filename to monitor", filename), " \t,:|", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff))
+ return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff))
+ return 0; // we return 0, so that we will retry to open it next time
+
+ // make sure we have 3 lines
+ size_t lines = procfile_lines(ff), l;
+ if(unlikely(lines < 2)) {
+ error("/proc/net/stat/synproxy has %zu lines, expected no less than 2. Disabling it.", lines);
+ return 1;
+ }
+
+ unsigned long long entries = 0, syn_received = 0, cookie_invalid = 0, cookie_valid = 0, cookie_retrans = 0, conn_reopened = 0;
+
+ // synproxy gives its values per CPU
+ for(l = 1; l < lines ;l++) {
+ size_t words = procfile_linewords(ff, l);
+ if(unlikely(words < 6))
+ continue;
+
+ entries += strtoull(procfile_lineword(ff, l, 0), NULL, 16);
+ syn_received += strtoull(procfile_lineword(ff, l, 1), NULL, 16);
+ cookie_invalid += strtoull(procfile_lineword(ff, l, 2), NULL, 16);
+ cookie_valid += strtoull(procfile_lineword(ff, l, 3), NULL, 16);
+ cookie_retrans += strtoull(procfile_lineword(ff, l, 4), NULL, 16);
+ conn_reopened += strtoull(procfile_lineword(ff, l, 5), NULL, 16);
+ }
+
+ unsigned long long events = entries + syn_received + cookie_invalid + cookie_valid + cookie_retrans + conn_reopened;
+
+ // --------------------------------------------------------------------
+
+ if((do_entries == CONFIG_BOOLEAN_AUTO && events) || do_entries == CONFIG_BOOLEAN_YES) {
+ do_entries = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_STAT_NETFILTER
+ , RRD_TYPE_NET_STAT_SYNPROXY "_entries"
+ , NULL
+ , RRD_TYPE_NET_STAT_SYNPROXY
+ , NULL
+ , "SYNPROXY Entries Used"
+ , "entries"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_SYNPROXY_NAME
+ , NETDATA_CHART_PRIO_SYNPROXY_ENTRIES
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "entries", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "entries", entries);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if((do_syns == CONFIG_BOOLEAN_AUTO && events) || do_syns == CONFIG_BOOLEAN_YES) {
+ do_syns = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_STAT_NETFILTER
+ , RRD_TYPE_NET_STAT_SYNPROXY "_syn_received"
+ , NULL
+ , RRD_TYPE_NET_STAT_SYNPROXY
+ , NULL
+ , "SYNPROXY SYN Packets received"
+ , "SYN/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_SYNPROXY_NAME
+ , NETDATA_CHART_PRIO_SYNPROXY_SYN_RECEIVED
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "received", syn_received);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if((do_reopened == CONFIG_BOOLEAN_AUTO && events) || do_reopened == CONFIG_BOOLEAN_YES) {
+ do_reopened = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_STAT_NETFILTER
+ , RRD_TYPE_NET_STAT_SYNPROXY "_conn_reopened"
+ , NULL
+ , RRD_TYPE_NET_STAT_SYNPROXY
+ , NULL
+ , "SYNPROXY Connections Reopened"
+ , "connections/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_SYNPROXY_NAME
+ , NETDATA_CHART_PRIO_SYNPROXY_CONN_OPEN
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "reopened", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "reopened", conn_reopened);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if((do_cookies == CONFIG_BOOLEAN_AUTO && events) || do_cookies == CONFIG_BOOLEAN_YES) {
+ do_cookies = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st = NULL;
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_STAT_NETFILTER
+ , RRD_TYPE_NET_STAT_SYNPROXY "_cookies"
+ , NULL
+ , RRD_TYPE_NET_STAT_SYNPROXY
+ , NULL
+ , "SYNPROXY TCP Cookies"
+ , "cookies/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_SYNPROXY_NAME
+ , NETDATA_CHART_PRIO_SYNPROXY_COOKIES
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(st, "valid", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "invalid", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "retransmits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st);
+
+ rrddim_set(st, "valid", cookie_valid);
+ rrddim_set(st, "invalid", cookie_invalid);
+ rrddim_set(st, "retransmits", cookie_retrans);
+ rrdset_done(st);
+ }
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_self_mountinfo.c b/collectors/proc.plugin/proc_self_mountinfo.c
new file mode 100644
index 000000000..3f17ccce2
--- /dev/null
+++ b/collectors/proc.plugin/proc_self_mountinfo.c
@@ -0,0 +1,403 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+// ----------------------------------------------------------------------------
+// taken from gnulib/mountlist.c
+
+#ifndef ME_REMOTE
+/* A file system is "remote" if its Fs_name contains a ':'
+ or if (it is of type (smbfs or cifs) and its Fs_name starts with '//')
+ or Fs_name is equal to "-hosts" (used by autofs to mount remote fs). */
+# define ME_REMOTE(Fs_name, Fs_type) \
+ (strchr (Fs_name, ':') != NULL \
+ || ((Fs_name)[0] == '/' \
+ && (Fs_name)[1] == '/' \
+ && (strcmp (Fs_type, "smbfs") == 0 \
+ || strcmp (Fs_type, "cifs") == 0)) \
+ || (strcmp("-hosts", Fs_name) == 0))
+#endif
+
+#define ME_DUMMY_0(Fs_name, Fs_type) \
+ (strcmp (Fs_type, "autofs") == 0 \
+ || strcmp (Fs_type, "proc") == 0 \
+ || strcmp (Fs_type, "subfs") == 0 \
+ /* for Linux 2.6/3.x */ \
+ || strcmp (Fs_type, "debugfs") == 0 \
+ || strcmp (Fs_type, "devpts") == 0 \
+ || strcmp (Fs_type, "fusectl") == 0 \
+ || strcmp (Fs_type, "mqueue") == 0 \
+ || strcmp (Fs_type, "rpc_pipefs") == 0 \
+ || strcmp (Fs_type, "sysfs") == 0 \
+ /* FreeBSD, Linux 2.4 */ \
+ || strcmp (Fs_type, "devfs") == 0 \
+ /* for NetBSD 3.0 */ \
+ || strcmp (Fs_type, "kernfs") == 0 \
+ /* for Irix 6.5 */ \
+ || strcmp (Fs_type, "ignore") == 0)
+
+/* Historically, we have marked as "dummy" any file system of type "none",
+ but now that programs like du need to know about bind-mounted directories,
+ we grant an exception to any with "bind" in its list of mount options.
+ I.e., those are *not* dummy entries. */
+# define ME_DUMMY(Fs_name, Fs_type) \
+ (ME_DUMMY_0 (Fs_name, Fs_type) || strcmp (Fs_type, "none") == 0)
+
+// ----------------------------------------------------------------------------
+
+// find the mount info with the given major:minor
+// in the supplied linked list of mountinfo structures
+struct mountinfo *mountinfo_find(struct mountinfo *root, unsigned long major, unsigned long minor) {
+ struct mountinfo *mi;
+
+ for(mi = root; mi ; mi = mi->next)
+ if(unlikely(mi->major == major && mi->minor == minor))
+ return mi;
+
+ return NULL;
+}
+
+// find the mount info with the given filesystem and mount_source
+// in the supplied linked list of mountinfo structures
+struct mountinfo *mountinfo_find_by_filesystem_mount_source(struct mountinfo *root, const char *filesystem, const char *mount_source) {
+ struct mountinfo *mi;
+ uint32_t filesystem_hash = simple_hash(filesystem), mount_source_hash = simple_hash(mount_source);
+
+ for(mi = root; mi ; mi = mi->next)
+ if(unlikely(mi->filesystem
+ && mi->mount_source
+ && mi->filesystem_hash == filesystem_hash
+ && mi->mount_source_hash == mount_source_hash
+ && !strcmp(mi->filesystem, filesystem)
+ && !strcmp(mi->mount_source, mount_source)))
+ return mi;
+
+ return NULL;
+}
+
+struct mountinfo *mountinfo_find_by_filesystem_super_option(struct mountinfo *root, const char *filesystem, const char *super_options) {
+ struct mountinfo *mi;
+ uint32_t filesystem_hash = simple_hash(filesystem);
+
+ size_t solen = strlen(super_options);
+
+ for(mi = root; mi ; mi = mi->next)
+ if(unlikely(mi->filesystem
+ && mi->super_options
+ && mi->filesystem_hash == filesystem_hash
+ && !strcmp(mi->filesystem, filesystem))) {
+
+ // super_options is a comma separated list
+ char *s = mi->super_options, *e;
+ while(*s) {
+ e = s + 1;
+ while(*e && *e != ',') e++;
+
+ size_t len = e - s;
+ if(unlikely(len == solen && !strncmp(s, super_options, len)))
+ return mi;
+
+ if(*e == ',') s = ++e;
+ else s = e;
+ }
+ }
+
+ return NULL;
+}
+
+static void mountinfo_free(struct mountinfo *mi) {
+ freez(mi->root);
+ freez(mi->mount_point);
+ freez(mi->mount_options);
+ freez(mi->persistent_id);
+/*
+ if(mi->optional_fields_count) {
+ int i;
+ for(i = 0; i < mi->optional_fields_count ; i++)
+ free(*mi->optional_fields[i]);
+ }
+ free(mi->optional_fields);
+*/
+ freez(mi->filesystem);
+ freez(mi->mount_source);
+ freez(mi->super_options);
+ freez(mi);
+}
+
+// free a linked list of mountinfo structures
+void mountinfo_free_all(struct mountinfo *mi) {
+ while(mi) {
+ struct mountinfo *t = mi;
+ mi = mi->next;
+
+ mountinfo_free(t);
+ }
+}
+
+static char *strdupz_decoding_octal(const char *string) {
+ char *buffer = strdupz(string);
+
+ char *d = buffer;
+ const char *s = string;
+
+ while(*s) {
+ if(unlikely(*s == '\\')) {
+ s++;
+ if(likely(isdigit(*s) && isdigit(s[1]) && isdigit(s[2]))) {
+ char c = *s++ - '0';
+ c <<= 3;
+ c |= *s++ - '0';
+ c <<= 3;
+ c |= *s++ - '0';
+ *d++ = c;
+ }
+ else *d++ = '_';
+ }
+ else *d++ = *s++;
+ }
+ *d = '\0';
+
+ return buffer;
+}
+
+static inline int is_read_only(const char *s) {
+ if(!s) return 0;
+
+ size_t len = strlen(s);
+ if(len < 2) return 0;
+ if(len == 2) {
+ if(!strcmp(s, "ro")) return 1;
+ return 0;
+ }
+ if(!strncmp(s, "ro,", 3)) return 1;
+ if(!strncmp(&s[len - 3], ",ro", 3)) return 1;
+ if(strstr(s, ",ro,")) return 1;
+ return 0;
+}
+
+// read the whole mountinfo into a linked list
+struct mountinfo *mountinfo_read(int do_statvfs) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/proc/self/mountinfo", netdata_configured_host_prefix);
+ procfile *ff = procfile_open(filename, " \t", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) {
+ snprintfz(filename, FILENAME_MAX, "%s/proc/1/mountinfo", netdata_configured_host_prefix);
+ ff = procfile_open(filename, " \t", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) return NULL;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff))
+ return NULL;
+
+ struct mountinfo *root = NULL, *last = NULL, *mi = NULL;
+
+ unsigned long l, lines = procfile_lines(ff);
+ for(l = 0; l < lines ;l++) {
+ if(unlikely(procfile_linewords(ff, l) < 5))
+ continue;
+
+ mi = mallocz(sizeof(struct mountinfo));
+
+ unsigned long w = 0;
+ mi->id = str2ul(procfile_lineword(ff, l, w)); w++;
+ mi->parentid = str2ul(procfile_lineword(ff, l, w)); w++;
+
+ char *major = procfile_lineword(ff, l, w), *minor; w++;
+ for(minor = major; *minor && *minor != ':' ;minor++) ;
+
+ if(unlikely(!*minor)) {
+ error("Cannot parse major:minor on '%s' at line %lu of '%s'", major, l + 1, filename);
+ freez(mi);
+ continue;
+ }
+
+ *minor = '\0';
+ minor++;
+
+ mi->flags = 0;
+
+ mi->major = str2ul(major);
+ mi->minor = str2ul(minor);
+
+ mi->root = strdupz(procfile_lineword(ff, l, w)); w++;
+ mi->root_hash = simple_hash(mi->root);
+
+ mi->mount_point = strdupz_decoding_octal(procfile_lineword(ff, l, w)); w++;
+ mi->mount_point_hash = simple_hash(mi->mount_point);
+
+ mi->persistent_id = strdupz(mi->mount_point);
+ netdata_fix_chart_id(mi->persistent_id);
+ mi->persistent_id_hash = simple_hash(mi->persistent_id);
+
+ mi->mount_options = strdupz(procfile_lineword(ff, l, w)); w++;
+
+ if(unlikely(is_read_only(mi->mount_options)))
+ mi->flags |= MOUNTINFO_READONLY;
+
+ // count the optional fields
+/*
+ unsigned long wo = w;
+*/
+ mi->optional_fields_count = 0;
+ char *s = procfile_lineword(ff, l, w);
+ while(*s && *s != '-') {
+ w++;
+ s = procfile_lineword(ff, l, w);
+ mi->optional_fields_count++;
+ }
+
+/*
+ if(unlikely(mi->optional_fields_count)) {
+ // we have some optional fields
+ // read them into a new array of pointers;
+
+ mi->optional_fields = mallocz(mi->optional_fields_count * sizeof(char *));
+
+ int i;
+ for(i = 0; i < mi->optional_fields_count ; i++) {
+ *mi->optional_fields[wo] = strdupz(procfile_lineword(ff, l, w));
+ wo++;
+ }
+ }
+ else
+ mi->optional_fields = NULL;
+*/
+
+ if(likely(*s == '-')) {
+ w++;
+
+ mi->filesystem = strdupz(procfile_lineword(ff, l, w)); w++;
+ mi->filesystem_hash = simple_hash(mi->filesystem);
+
+ mi->mount_source = strdupz_decoding_octal(procfile_lineword(ff, l, w)); w++;
+ mi->mount_source_hash = simple_hash(mi->mount_source);
+
+ mi->super_options = strdupz(procfile_lineword(ff, l, w)); w++;
+
+ if(unlikely(is_read_only(mi->super_options)))
+ mi->flags |= MOUNTINFO_READONLY;
+
+ if(unlikely(ME_DUMMY(mi->mount_source, mi->filesystem)))
+ mi->flags |= MOUNTINFO_IS_DUMMY;
+
+ if(unlikely(ME_REMOTE(mi->mount_source, mi->filesystem)))
+ mi->flags |= MOUNTINFO_IS_REMOTE;
+
+ // mark as BIND the duplicates (i.e. same filesystem + same source)
+ if(do_statvfs) {
+ struct stat buf;
+ if(unlikely(stat(mi->mount_point, &buf) == -1)) {
+ mi->st_dev = 0;
+ mi->flags |= MOUNTINFO_NO_STAT;
+ }
+ else {
+ mi->st_dev = buf.st_dev;
+
+ struct mountinfo *mt;
+ for(mt = root; mt; mt = mt->next) {
+ if(unlikely(mt->st_dev == mi->st_dev && !(mt->flags & MOUNTINFO_IS_SAME_DEV))) {
+ if(strlen(mi->mount_point) < strlen(mt->mount_point))
+ mt->flags |= MOUNTINFO_IS_SAME_DEV;
+ else
+ mi->flags |= MOUNTINFO_IS_SAME_DEV;
+ }
+ }
+ }
+ }
+ else {
+ mi->st_dev = 0;
+ }
+ }
+ else {
+ mi->filesystem = NULL;
+ mi->filesystem_hash = 0;
+
+ mi->mount_source = NULL;
+ mi->mount_source_hash = 0;
+
+ mi->super_options = NULL;
+
+ mi->st_dev = 0;
+ }
+
+ // check if it has size
+ if(do_statvfs && !(mi->flags & MOUNTINFO_IS_DUMMY)) {
+ struct statvfs buff_statvfs;
+ if(unlikely(statvfs(mi->mount_point, &buff_statvfs) < 0)) {
+ mi->flags |= MOUNTINFO_NO_STAT;
+ }
+ else if(unlikely(!buff_statvfs.f_blocks /* || !buff_statvfs.f_files */)) {
+ mi->flags |= MOUNTINFO_NO_SIZE;
+ }
+ }
+
+ // link it
+ if(unlikely(!root))
+ root = mi;
+ else
+ last->next = mi;
+
+ last = mi;
+ mi->next = NULL;
+
+/*
+#ifdef NETDATA_INTERNAL_CHECKS
+ fprintf(stderr, "MOUNTINFO: %ld %ld %lu:%lu root '%s', persistent id '%s', mount point '%s', mount options '%s', filesystem '%s', mount source '%s', super options '%s'%s%s%s%s%s%s\n",
+ mi->id,
+ mi->parentid,
+ mi->major,
+ mi->minor,
+ mi->root,
+ mi->persistent_id,
+ (mi->mount_point)?mi->mount_point:"",
+ (mi->mount_options)?mi->mount_options:"",
+ (mi->filesystem)?mi->filesystem:"",
+ (mi->mount_source)?mi->mount_source:"",
+ (mi->super_options)?mi->super_options:"",
+ (mi->flags & MOUNTINFO_IS_DUMMY)?" DUMMY":"",
+ (mi->flags & MOUNTINFO_IS_BIND)?" BIND":"",
+ (mi->flags & MOUNTINFO_IS_REMOTE)?" REMOTE":"",
+ (mi->flags & MOUNTINFO_NO_STAT)?" NOSTAT":"",
+ (mi->flags & MOUNTINFO_NO_SIZE)?" NOSIZE":"",
+ (mi->flags & MOUNTINFO_IS_SAME_DEV)?" SAMEDEV":""
+ );
+#endif
+*/
+ }
+
+/* find if the mount options have "bind" in them
+ {
+ FILE *fp = setmntent(MOUNTED, "r");
+ if (fp != NULL) {
+ struct mntent mntbuf;
+ struct mntent *mnt;
+ char buf[4096 + 1];
+
+ while ((mnt = getmntent_r(fp, &mntbuf, buf, 4096))) {
+ char *bind = hasmntopt(mnt, "bind");
+ if(unlikely(bind)) {
+ struct mountinfo *mi;
+ for(mi = root; mi ; mi = mi->next) {
+ if(unlikely(strcmp(mnt->mnt_dir, mi->mount_point) == 0)) {
+ fprintf(stderr, "Mount point '%s' is BIND\n", mi->mount_point);
+ mi->flags |= MOUNTINFO_IS_BIND;
+ break;
+ }
+ }
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(unlikely(!mi)) {
+ error("Mount point '%s' not found in /proc/self/mountinfo", mnt->mnt_dir);
+ }
+#endif
+ }
+ }
+ endmntent(fp);
+ }
+ }
+*/
+
+ procfile_close(ff);
+ return root;
+}
diff --git a/collectors/proc.plugin/proc_self_mountinfo.h b/collectors/proc.plugin/proc_self_mountinfo.h
new file mode 100644
index 000000000..15d63c786
--- /dev/null
+++ b/collectors/proc.plugin/proc_self_mountinfo.h
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_PROC_SELF_MOUNTINFO_H
+#define NETDATA_PROC_SELF_MOUNTINFO_H 1
+
+#define MOUNTINFO_IS_DUMMY 0x00000001
+#define MOUNTINFO_IS_REMOTE 0x00000002
+#define MOUNTINFO_IS_BIND 0x00000004
+#define MOUNTINFO_IS_SAME_DEV 0x00000008
+#define MOUNTINFO_NO_STAT 0x00000010
+#define MOUNTINFO_NO_SIZE 0x00000020
+#define MOUNTINFO_READONLY 0x00000040
+
+struct mountinfo {
+ long id; // mount ID: unique identifier of the mount (may be reused after umount(2)).
+ long parentid; // parent ID: ID of parent mount (or of self for the top of the mount tree).
+ unsigned long major; // major:minor: value of st_dev for files on filesystem (see stat(2)).
+ unsigned long minor;
+
+ char *persistent_id; // a calculated persistent id for the mount point
+ uint32_t persistent_id_hash;
+
+ char *root; // root: root of the mount within the filesystem.
+ uint32_t root_hash;
+
+ char *mount_point; // mount point: mount point relative to the process's root.
+ uint32_t mount_point_hash;
+
+ char *mount_options; // mount options: per-mount options.
+
+ int optional_fields_count;
+/*
+ char ***optional_fields; // optional fields: zero or more fields of the form "tag[:value]".
+*/
+ char *filesystem; // filesystem type: name of filesystem in the form "type[.subtype]".
+ uint32_t filesystem_hash;
+
+ char *mount_source; // mount source: filesystem-specific information or "none".
+ uint32_t mount_source_hash;
+
+ char *super_options; // super options: per-superblock options.
+
+ uint32_t flags;
+
+ dev_t st_dev; // id of device as given by stat()
+
+ struct mountinfo *next;
+};
+
+extern struct mountinfo *mountinfo_find(struct mountinfo *root, unsigned long major, unsigned long minor);
+extern struct mountinfo *mountinfo_find_by_filesystem_mount_source(struct mountinfo *root, const char *filesystem, const char *mount_source);
+extern struct mountinfo *mountinfo_find_by_filesystem_super_option(struct mountinfo *root, const char *filesystem, const char *super_options);
+
+extern void mountinfo_free_all(struct mountinfo *mi);
+extern struct mountinfo *mountinfo_read(int do_statvfs);
+
+#endif /* NETDATA_PROC_SELF_MOUNTINFO_H */ \ No newline at end of file
diff --git a/collectors/proc.plugin/proc_softirqs.c b/collectors/proc.plugin/proc_softirqs.c
new file mode 100644
index 000000000..d68c69bb7
--- /dev/null
+++ b/collectors/proc.plugin/proc_softirqs.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_SOFTIRQS_NAME "/proc/softirqs"
+
+#define MAX_INTERRUPT_NAME 50
+
+struct cpu_interrupt {
+ unsigned long long value;
+ RRDDIM *rd;
+};
+
+struct interrupt {
+ int used;
+ char *id;
+ char name[MAX_INTERRUPT_NAME + 1];
+ RRDDIM *rd;
+ unsigned long long total;
+ struct cpu_interrupt cpu[];
+};
+
+// since each interrupt is variable in size
+// we use this to calculate its record size
+#define recordsize(cpus) (sizeof(struct interrupt) + ((cpus) * sizeof(struct cpu_interrupt)))
+
+// given a base, get a pointer to each record
+#define irrindex(base, line, cpus) ((struct interrupt *)&((char *)(base))[(line) * recordsize(cpus)])
+
+static inline struct interrupt *get_interrupts_array(size_t lines, int cpus) {
+ static struct interrupt *irrs = NULL;
+ static size_t allocated = 0;
+
+ if(unlikely(lines != allocated)) {
+ uint32_t l;
+ int c;
+
+ irrs = (struct interrupt *)reallocz(irrs, lines * recordsize(cpus));
+
+ // reset all interrupt RRDDIM pointers as any line could have shifted
+ for(l = 0; l < lines ;l++) {
+ struct interrupt *irr = irrindex(irrs, l, cpus);
+ irr->rd = NULL;
+ irr->name[0] = '\0';
+ for(c = 0; c < cpus ;c++)
+ irr->cpu[c].rd = NULL;
+ }
+
+ allocated = lines;
+ }
+
+ return irrs;
+}
+
+int do_proc_softirqs(int update_every, usec_t dt) {
+ (void)dt;
+ static procfile *ff = NULL;
+ static int cpus = -1, do_per_core = CONFIG_BOOLEAN_INVALID;
+ struct interrupt *irrs = NULL;
+
+ if(unlikely(do_per_core == CONFIG_BOOLEAN_INVALID))
+ do_per_core = config_get_boolean_ondemand("plugin:proc:/proc/softirqs", "interrupts per core", CONFIG_BOOLEAN_AUTO);
+
+ if(unlikely(!ff)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/softirqs");
+ ff = procfile_open(config_get("plugin:proc:/proc/softirqs", "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time
+
+ size_t lines = procfile_lines(ff), l;
+ size_t words = procfile_linewords(ff, 0);
+
+ if(unlikely(!lines)) {
+ error("Cannot read /proc/softirqs, zero lines reported.");
+ return 1;
+ }
+
+ // find how many CPUs are there
+ if(unlikely(cpus == -1)) {
+ uint32_t w;
+ cpus = 0;
+ for(w = 0; w < words ; w++) {
+ if(likely(strncmp(procfile_lineword(ff, 0, w), "CPU", 3) == 0))
+ cpus++;
+ }
+ }
+
+ if(unlikely(!cpus)) {
+ error("PLUGIN: PROC_SOFTIRQS: Cannot find the number of CPUs in /proc/softirqs");
+ return 1;
+ }
+
+ // allocate the size we need;
+ irrs = get_interrupts_array(lines, cpus);
+ irrs[0].used = 0;
+
+ // loop through all lines
+ for(l = 1; l < lines ;l++) {
+ struct interrupt *irr = irrindex(irrs, l, cpus);
+ irr->used = 0;
+ irr->total = 0;
+
+ words = procfile_linewords(ff, l);
+ if(unlikely(!words)) continue;
+
+ irr->id = procfile_lineword(ff, l, 0);
+ if(unlikely(!irr->id || !irr->id[0])) continue;
+
+ int c;
+ for(c = 0; c < cpus ;c++) {
+ if(likely((c + 1) < (int)words))
+ irr->cpu[c].value = str2ull(procfile_lineword(ff, l, (uint32_t)(c + 1)));
+ else
+ irr->cpu[c].value = 0;
+
+ irr->total += irr->cpu[c].value;
+ }
+
+ strncpyz(irr->name, irr->id, MAX_INTERRUPT_NAME);
+
+ irr->used = 1;
+ }
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st_system_softirqs = NULL;
+ if(unlikely(!st_system_softirqs))
+ st_system_softirqs = rrdset_create_localhost(
+ "system"
+ , "softirqs"
+ , NULL
+ , "softirqs"
+ , NULL
+ , "System softirqs"
+ , "softirqs/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_SOFTIRQS_NAME
+ , NETDATA_CHART_PRIO_SYSTEM_SOFTIRQS
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+ else
+ rrdset_next(st_system_softirqs);
+
+ for(l = 0; l < lines ;l++) {
+ struct interrupt *irr = irrindex(irrs, l, cpus);
+
+ if(irr->used && irr->total) {
+ // some interrupt may have changed without changing the total number of lines
+ // if the same number of interrupts have been added and removed between two
+ // calls of this function.
+ if(unlikely(!irr->rd || strncmp(irr->name, irr->rd->name, MAX_INTERRUPT_NAME) != 0)) {
+ irr->rd = rrddim_add(st_system_softirqs, irr->id, irr->name, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_set_name(st_system_softirqs, irr->rd, irr->name);
+
+ // also reset per cpu RRDDIMs to avoid repeating strncmp() in the per core loop
+ if(likely(do_per_core != CONFIG_BOOLEAN_NO)) {
+ int c;
+ for(c = 0; c < cpus; c++) irr->cpu[c].rd = NULL;
+ }
+ }
+
+ rrddim_set_by_pointer(st_system_softirqs, irr->rd, irr->total);
+ }
+ }
+
+ rrdset_done(st_system_softirqs);
+
+ // --------------------------------------------------------------------
+
+ if(do_per_core != CONFIG_BOOLEAN_NO) {
+ static RRDSET **core_st = NULL;
+ static int old_cpus = 0;
+
+ if(old_cpus < cpus) {
+ core_st = reallocz(core_st, sizeof(RRDSET *) * cpus);
+ memset(&core_st[old_cpus], 0, sizeof(RRDSET *) * (cpus - old_cpus));
+ old_cpus = cpus;
+ }
+
+ int c;
+
+ for(c = 0; c < cpus ; c++) {
+ if(unlikely(!core_st[c])) {
+ // find if everything is just zero
+ unsigned long long core_sum = 0;
+
+ for (l = 0; l < lines; l++) {
+ struct interrupt *irr = irrindex(irrs, l, cpus);
+ if (unlikely(!irr->used)) continue;
+ core_sum += irr->cpu[c].value;
+ }
+
+ if (unlikely(core_sum == 0)) continue; // try next core
+
+ char id[50 + 1];
+ snprintfz(id, 50, "cpu%d_softirqs", c);
+
+ char title[100 + 1];
+ snprintfz(title, 100, "CPU%d softirqs", c);
+
+ core_st[c] = rrdset_create_localhost(
+ "cpu"
+ , id
+ , NULL
+ , "softirqs"
+ , "cpu.softirqs"
+ , title
+ , "softirqs/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_SOFTIRQS_NAME
+ , NETDATA_CHART_PRIO_SOFTIRQS_PER_CORE + c
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+ }
+ else
+ rrdset_next(core_st[c]);
+
+ for(l = 0; l < lines ;l++) {
+ struct interrupt *irr = irrindex(irrs, l, cpus);
+
+ if(irr->used && (do_per_core == CONFIG_BOOLEAN_YES || irr->cpu[c].value)) {
+ if(unlikely(!irr->cpu[c].rd)) {
+ irr->cpu[c].rd = rrddim_add(core_st[c], irr->id, irr->name, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_set_name(core_st[c], irr->cpu[c].rd, irr->name);
+ }
+
+ rrddim_set_by_pointer(core_st[c], irr->cpu[c].rd, irr->cpu[c].value);
+ }
+ }
+
+ rrdset_done(core_st[c]);
+ }
+ }
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_spl_kstat_zfs.c b/collectors/proc.plugin/proc_spl_kstat_zfs.c
new file mode 100644
index 000000000..a96b236cb
--- /dev/null
+++ b/collectors/proc.plugin/proc_spl_kstat_zfs.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+#include "zfs_common.h"
+
+#define ZFS_PROC_ARCSTATS "/proc/spl/kstat/zfs/arcstats"
+
+extern struct arcstats arcstats;
+
+int do_proc_spl_kstat_zfs_arcstats(int update_every, usec_t dt) {
+ (void)dt;
+
+ static procfile *ff = NULL;
+ static ARL_BASE *arl_base = NULL;
+
+ arcstats.l2exist = -1;
+
+ if(unlikely(!arl_base)) {
+ arl_base = arl_create("arcstats", NULL, 60);
+
+ arl_expect(arl_base, "hits", &arcstats.hits);
+ arl_expect(arl_base, "misses", &arcstats.misses);
+ arl_expect(arl_base, "demand_data_hits", &arcstats.demand_data_hits);
+ arl_expect(arl_base, "demand_data_misses", &arcstats.demand_data_misses);
+ arl_expect(arl_base, "demand_metadata_hits", &arcstats.demand_metadata_hits);
+ arl_expect(arl_base, "demand_metadata_misses", &arcstats.demand_metadata_misses);
+ arl_expect(arl_base, "prefetch_data_hits", &arcstats.prefetch_data_hits);
+ arl_expect(arl_base, "prefetch_data_misses", &arcstats.prefetch_data_misses);
+ arl_expect(arl_base, "prefetch_metadata_hits", &arcstats.prefetch_metadata_hits);
+ arl_expect(arl_base, "prefetch_metadata_misses", &arcstats.prefetch_metadata_misses);
+ arl_expect(arl_base, "mru_hits", &arcstats.mru_hits);
+ arl_expect(arl_base, "mru_ghost_hits", &arcstats.mru_ghost_hits);
+ arl_expect(arl_base, "mfu_hits", &arcstats.mfu_hits);
+ arl_expect(arl_base, "mfu_ghost_hits", &arcstats.mfu_ghost_hits);
+ arl_expect(arl_base, "deleted", &arcstats.deleted);
+ arl_expect(arl_base, "mutex_miss", &arcstats.mutex_miss);
+ arl_expect(arl_base, "evict_skip", &arcstats.evict_skip);
+ arl_expect(arl_base, "evict_not_enough", &arcstats.evict_not_enough);
+ arl_expect(arl_base, "evict_l2_cached", &arcstats.evict_l2_cached);
+ arl_expect(arl_base, "evict_l2_eligible", &arcstats.evict_l2_eligible);
+ arl_expect(arl_base, "evict_l2_ineligible", &arcstats.evict_l2_ineligible);
+ arl_expect(arl_base, "evict_l2_skip", &arcstats.evict_l2_skip);
+ arl_expect(arl_base, "hash_elements", &arcstats.hash_elements);
+ arl_expect(arl_base, "hash_elements_max", &arcstats.hash_elements_max);
+ arl_expect(arl_base, "hash_collisions", &arcstats.hash_collisions);
+ arl_expect(arl_base, "hash_chains", &arcstats.hash_chains);
+ arl_expect(arl_base, "hash_chain_max", &arcstats.hash_chain_max);
+ arl_expect(arl_base, "p", &arcstats.p);
+ arl_expect(arl_base, "c", &arcstats.c);
+ arl_expect(arl_base, "c_min", &arcstats.c_min);
+ arl_expect(arl_base, "c_max", &arcstats.c_max);
+ arl_expect(arl_base, "size", &arcstats.size);
+ arl_expect(arl_base, "hdr_size", &arcstats.hdr_size);
+ arl_expect(arl_base, "data_size", &arcstats.data_size);
+ arl_expect(arl_base, "metadata_size", &arcstats.metadata_size);
+ arl_expect(arl_base, "other_size", &arcstats.other_size);
+ arl_expect(arl_base, "anon_size", &arcstats.anon_size);
+ arl_expect(arl_base, "anon_evictable_data", &arcstats.anon_evictable_data);
+ arl_expect(arl_base, "anon_evictable_metadata", &arcstats.anon_evictable_metadata);
+ arl_expect(arl_base, "mru_size", &arcstats.mru_size);
+ arl_expect(arl_base, "mru_evictable_data", &arcstats.mru_evictable_data);
+ arl_expect(arl_base, "mru_evictable_metadata", &arcstats.mru_evictable_metadata);
+ arl_expect(arl_base, "mru_ghost_size", &arcstats.mru_ghost_size);
+ arl_expect(arl_base, "mru_ghost_evictable_data", &arcstats.mru_ghost_evictable_data);
+ arl_expect(arl_base, "mru_ghost_evictable_metadata", &arcstats.mru_ghost_evictable_metadata);
+ arl_expect(arl_base, "mfu_size", &arcstats.mfu_size);
+ arl_expect(arl_base, "mfu_evictable_data", &arcstats.mfu_evictable_data);
+ arl_expect(arl_base, "mfu_evictable_metadata", &arcstats.mfu_evictable_metadata);
+ arl_expect(arl_base, "mfu_ghost_size", &arcstats.mfu_ghost_size);
+ arl_expect(arl_base, "mfu_ghost_evictable_data", &arcstats.mfu_ghost_evictable_data);
+ arl_expect(arl_base, "mfu_ghost_evictable_metadata", &arcstats.mfu_ghost_evictable_metadata);
+ arl_expect(arl_base, "l2_hits", &arcstats.l2_hits);
+ arl_expect(arl_base, "l2_misses", &arcstats.l2_misses);
+ arl_expect(arl_base, "l2_feeds", &arcstats.l2_feeds);
+ arl_expect(arl_base, "l2_rw_clash", &arcstats.l2_rw_clash);
+ arl_expect(arl_base, "l2_read_bytes", &arcstats.l2_read_bytes);
+ arl_expect(arl_base, "l2_write_bytes", &arcstats.l2_write_bytes);
+ arl_expect(arl_base, "l2_writes_sent", &arcstats.l2_writes_sent);
+ arl_expect(arl_base, "l2_writes_done", &arcstats.l2_writes_done);
+ arl_expect(arl_base, "l2_writes_error", &arcstats.l2_writes_error);
+ arl_expect(arl_base, "l2_writes_lock_retry", &arcstats.l2_writes_lock_retry);
+ arl_expect(arl_base, "l2_evict_lock_retry", &arcstats.l2_evict_lock_retry);
+ arl_expect(arl_base, "l2_evict_reading", &arcstats.l2_evict_reading);
+ arl_expect(arl_base, "l2_evict_l1cached", &arcstats.l2_evict_l1cached);
+ arl_expect(arl_base, "l2_free_on_write", &arcstats.l2_free_on_write);
+ arl_expect(arl_base, "l2_cdata_free_on_write", &arcstats.l2_cdata_free_on_write);
+ arl_expect(arl_base, "l2_abort_lowmem", &arcstats.l2_abort_lowmem);
+ arl_expect(arl_base, "l2_cksum_bad", &arcstats.l2_cksum_bad);
+ arl_expect(arl_base, "l2_io_error", &arcstats.l2_io_error);
+ arl_expect(arl_base, "l2_size", &arcstats.l2_size);
+ arl_expect(arl_base, "l2_asize", &arcstats.l2_asize);
+ arl_expect(arl_base, "l2_hdr_size", &arcstats.l2_hdr_size);
+ arl_expect(arl_base, "l2_compress_successes", &arcstats.l2_compress_successes);
+ arl_expect(arl_base, "l2_compress_zeros", &arcstats.l2_compress_zeros);
+ arl_expect(arl_base, "l2_compress_failures", &arcstats.l2_compress_failures);
+ arl_expect(arl_base, "memory_throttle_count", &arcstats.memory_throttle_count);
+ arl_expect(arl_base, "duplicate_buffers", &arcstats.duplicate_buffers);
+ arl_expect(arl_base, "duplicate_buffers_size", &arcstats.duplicate_buffers_size);
+ arl_expect(arl_base, "duplicate_reads", &arcstats.duplicate_reads);
+ arl_expect(arl_base, "memory_direct_count", &arcstats.memory_direct_count);
+ arl_expect(arl_base, "memory_indirect_count", &arcstats.memory_indirect_count);
+ arl_expect(arl_base, "arc_no_grow", &arcstats.arc_no_grow);
+ arl_expect(arl_base, "arc_tempreserve", &arcstats.arc_tempreserve);
+ arl_expect(arl_base, "arc_loaned_bytes", &arcstats.arc_loaned_bytes);
+ arl_expect(arl_base, "arc_prune", &arcstats.arc_prune);
+ arl_expect(arl_base, "arc_meta_used", &arcstats.arc_meta_used);
+ arl_expect(arl_base, "arc_meta_limit", &arcstats.arc_meta_limit);
+ arl_expect(arl_base, "arc_meta_max", &arcstats.arc_meta_max);
+ arl_expect(arl_base, "arc_meta_min", &arcstats.arc_meta_min);
+ arl_expect(arl_base, "arc_need_free", &arcstats.arc_need_free);
+ arl_expect(arl_base, "arc_sys_free", &arcstats.arc_sys_free);
+ }
+
+ if(unlikely(!ff)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, ZFS_PROC_ARCSTATS);
+ ff = procfile_open(config_get("plugin:proc:" ZFS_PROC_ARCSTATS, "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff))
+ return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff))
+ return 0; // we return 0, so that we will retry to open it next time
+
+ size_t lines = procfile_lines(ff), l;
+
+ arl_begin(arl_base);
+
+ for(l = 0; l < lines ;l++) {
+ size_t words = procfile_linewords(ff, l);
+ if(unlikely(words < 3)) {
+ if(unlikely(words)) error("Cannot read " ZFS_PROC_ARCSTATS " line %zu. Expected 3 params, read %zu.", l, words);
+ continue;
+ }
+
+ const char *key = procfile_lineword(ff, l, 0);
+ const char *value = procfile_lineword(ff, l, 2);
+
+ if(unlikely(arcstats.l2exist == -1)) {
+ if(key[0] == 'l' && key[1] == '2' && key[2] == '_')
+ arcstats.l2exist = 1;
+ }
+
+ if(unlikely(arl_check(arl_base, key, value))) break;
+ }
+
+ if(unlikely(arcstats.l2exist == -1))
+ arcstats.l2exist = 0;
+
+ generate_charts_arcstats(PLUGIN_PROC_NAME, ZFS_PROC_ARCSTATS, update_every);
+ generate_charts_arc_summary(PLUGIN_PROC_NAME, ZFS_PROC_ARCSTATS, update_every);
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_stat.c b/collectors/proc.plugin/proc_stat.c
new file mode 100644
index 000000000..fb77df647
--- /dev/null
+++ b/collectors/proc.plugin/proc_stat.c
@@ -0,0 +1,570 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_STAT_NAME "/proc/stat"
+
+struct per_core_single_number_file {
+ unsigned char found:1;
+ const char *filename;
+ int fd;
+ collected_number value;
+ RRDDIM *rd;
+};
+
+#define CORE_THROTTLE_COUNT_INDEX 0
+#define PACKAGE_THROTTLE_COUNT_INDEX 1
+#define SCALING_CUR_FREQ_INDEX 2
+#define PER_CORE_FILES 3
+
+struct cpu_chart {
+ const char *id;
+
+ RRDSET *st;
+ RRDDIM *rd_user;
+ RRDDIM *rd_nice;
+ RRDDIM *rd_system;
+ RRDDIM *rd_idle;
+ RRDDIM *rd_iowait;
+ RRDDIM *rd_irq;
+ RRDDIM *rd_softirq;
+ RRDDIM *rd_steal;
+ RRDDIM *rd_guest;
+ RRDDIM *rd_guest_nice;
+
+ struct per_core_single_number_file files[PER_CORE_FILES];
+};
+
+static int keep_per_core_fds_open = CONFIG_BOOLEAN_YES;
+
+static int read_per_core_files(struct cpu_chart *all_cpu_charts, size_t len, size_t index) {
+ char buf[50 + 1];
+ size_t x, files_read = 0, files_nonzero = 0;
+
+ for(x = 0; x < len ; x++) {
+ struct per_core_single_number_file *f = &all_cpu_charts[x].files[index];
+
+ f->found = 0;
+
+ if(unlikely(!f->filename))
+ continue;
+
+ if(unlikely(f->fd == -1)) {
+ f->fd = open(f->filename, O_RDONLY);
+ if (unlikely(f->fd == -1)) {
+ error("Cannot open file '%s'", f->filename);
+ continue;
+ }
+ }
+
+ ssize_t ret = read(f->fd, buf, 50);
+ if(unlikely(ret < 0)) {
+ // cannot read that file
+
+ error("Cannot read file '%s'", f->filename);
+ close(f->fd);
+ f->fd = -1;
+ continue;
+ }
+ else {
+ // successful read
+
+ // terminate the buffer
+ buf[ret] = '\0';
+
+ if(unlikely(keep_per_core_fds_open != CONFIG_BOOLEAN_YES)) {
+ close(f->fd);
+ f->fd = -1;
+ }
+ else if(lseek(f->fd, 0, SEEK_SET) == -1) {
+ error("Cannot seek in file '%s'", f->filename);
+ close(f->fd);
+ f->fd = -1;
+ }
+ }
+
+ files_read++;
+ f->found = 1;
+
+ f->value = str2ll(buf, NULL);
+ // info("read '%s', parsed as " COLLECTED_NUMBER_FORMAT, buf, f->value);
+ if(likely(f->value != 0))
+ files_nonzero++;
+ }
+
+ if(files_read == 0)
+ return -1;
+
+ if(files_nonzero == 0)
+ return 0;
+
+ return (int)files_nonzero;
+}
+
+static void chart_per_core_files(struct cpu_chart *all_cpu_charts, size_t len, size_t index, RRDSET *st, collected_number multiplier, collected_number divisor, RRD_ALGORITHM algorithm) {
+ size_t x;
+ for(x = 0; x < len ; x++) {
+ struct per_core_single_number_file *f = &all_cpu_charts[x].files[index];
+
+ if(unlikely(!f->found))
+ continue;
+
+ if(unlikely(!f->rd))
+ f->rd = rrddim_add(st, all_cpu_charts[x].id, NULL, multiplier, divisor, algorithm);
+
+ rrddim_set_by_pointer(st, f->rd, f->value);
+ }
+}
+
+int do_proc_stat(int update_every, usec_t dt) {
+ (void)dt;
+
+ static struct cpu_chart *all_cpu_charts = NULL;
+ static size_t all_cpu_charts_size = 0;
+ static procfile *ff = NULL;
+ static int do_cpu = -1, do_cpu_cores = -1, do_interrupts = -1, do_context = -1, do_forks = -1, do_processes = -1, do_core_throttle_count = -1, do_package_throttle_count = -1, do_scaling_cur_freq = -1;
+ static uint32_t hash_intr, hash_ctxt, hash_processes, hash_procs_running, hash_procs_blocked;
+ static char *core_throttle_count_filename = NULL, *package_throttle_count_filename = NULL, *scaling_cur_freq_filename = NULL;
+ static RRDVAR *cpus_var = NULL;
+ size_t cores_found = (size_t)processors;
+
+ if(unlikely(do_cpu == -1)) {
+ do_cpu = config_get_boolean("plugin:proc:/proc/stat", "cpu utilization", CONFIG_BOOLEAN_YES);
+ do_cpu_cores = config_get_boolean("plugin:proc:/proc/stat", "per cpu core utilization", CONFIG_BOOLEAN_YES);
+ do_interrupts = config_get_boolean("plugin:proc:/proc/stat", "cpu interrupts", CONFIG_BOOLEAN_YES);
+ do_context = config_get_boolean("plugin:proc:/proc/stat", "context switches", CONFIG_BOOLEAN_YES);
+ do_forks = config_get_boolean("plugin:proc:/proc/stat", "processes started", CONFIG_BOOLEAN_YES);
+ do_processes = config_get_boolean("plugin:proc:/proc/stat", "processes running", CONFIG_BOOLEAN_YES);
+
+ // give sane defaults based on the number of processors
+ if(processors > 50) {
+ // the system has too many processors
+ keep_per_core_fds_open = CONFIG_BOOLEAN_NO;
+ do_core_throttle_count = CONFIG_BOOLEAN_NO;
+ do_package_throttle_count = CONFIG_BOOLEAN_NO;
+ do_scaling_cur_freq = CONFIG_BOOLEAN_NO;
+ }
+ else {
+ // the system has a reasonable number of processors
+ keep_per_core_fds_open = CONFIG_BOOLEAN_YES;
+ do_core_throttle_count = CONFIG_BOOLEAN_AUTO;
+ do_package_throttle_count = CONFIG_BOOLEAN_NO;
+ do_scaling_cur_freq = CONFIG_BOOLEAN_NO;
+ }
+
+ keep_per_core_fds_open = config_get_boolean("plugin:proc:/proc/stat", "keep per core files open", keep_per_core_fds_open);
+ do_core_throttle_count = config_get_boolean_ondemand("plugin:proc:/proc/stat", "core_throttle_count", do_core_throttle_count);
+ do_package_throttle_count = config_get_boolean_ondemand("plugin:proc:/proc/stat", "package_throttle_count", do_package_throttle_count);
+ do_scaling_cur_freq = config_get_boolean_ondemand("plugin:proc:/proc/stat", "scaling_cur_freq", do_scaling_cur_freq);
+
+ hash_intr = simple_hash("intr");
+ hash_ctxt = simple_hash("ctxt");
+ hash_processes = simple_hash("processes");
+ hash_procs_running = simple_hash("procs_running");
+ hash_procs_blocked = simple_hash("procs_blocked");
+
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/cpu/%s/thermal_throttle/core_throttle_count");
+ core_throttle_count_filename = config_get("plugin:proc:/proc/stat", "core_throttle_count filename to monitor", filename);
+
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/cpu/%s/thermal_throttle/package_throttle_count");
+ package_throttle_count_filename = config_get("plugin:proc:/proc/stat", "package_throttle_count filename to monitor", filename);
+
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/cpu/%s/cpufreq/scaling_cur_freq");
+ scaling_cur_freq_filename = config_get("plugin:proc:/proc/stat", "scaling_cur_freq filename to monitor", filename);
+ }
+
+ if(unlikely(!ff)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/stat");
+ ff = procfile_open(config_get("plugin:proc:/proc/stat", "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time
+
+ size_t lines = procfile_lines(ff), l;
+ size_t words;
+
+ unsigned long long processes = 0, running = 0 , blocked = 0;
+
+ for(l = 0; l < lines ;l++) {
+ char *row_key = procfile_lineword(ff, l, 0);
+ uint32_t hash = simple_hash(row_key);
+
+ // faster strncmp(row_key, "cpu", 3) == 0
+ if(likely(row_key[0] == 'c' && row_key[1] == 'p' && row_key[2] == 'u')) {
+ words = procfile_linewords(ff, l);
+ if(unlikely(words < 9)) {
+ error("Cannot read /proc/stat cpu line. Expected 9 params, read %zu.", words);
+ continue;
+ }
+
+ size_t core = (row_key[3] == '\0') ? 0 : str2ul(&row_key[3]) + 1;
+ if(core > 0) cores_found = core;
+
+ if(likely((core == 0 && do_cpu) || (core > 0 && do_cpu_cores))) {
+ char *id;
+ unsigned long long user = 0, nice = 0, system = 0, idle = 0, iowait = 0, irq = 0, softirq = 0, steal = 0, guest = 0, guest_nice = 0;
+
+ id = row_key;
+ user = str2ull(procfile_lineword(ff, l, 1));
+ nice = str2ull(procfile_lineword(ff, l, 2));
+ system = str2ull(procfile_lineword(ff, l, 3));
+ idle = str2ull(procfile_lineword(ff, l, 4));
+ iowait = str2ull(procfile_lineword(ff, l, 5));
+ irq = str2ull(procfile_lineword(ff, l, 6));
+ softirq = str2ull(procfile_lineword(ff, l, 7));
+ steal = str2ull(procfile_lineword(ff, l, 8));
+
+ guest = str2ull(procfile_lineword(ff, l, 9));
+ user -= guest;
+
+ guest_nice = str2ull(procfile_lineword(ff, l, 10));
+ nice -= guest_nice;
+
+ char *title, *type, *context, *family;
+ long priority;
+
+ if(core >= all_cpu_charts_size) {
+ size_t old_cpu_charts_size = all_cpu_charts_size;
+ all_cpu_charts_size = core + 1;
+ all_cpu_charts = reallocz(all_cpu_charts, sizeof(struct cpu_chart) * all_cpu_charts_size);
+ memset(&all_cpu_charts[old_cpu_charts_size], 0, sizeof(struct cpu_chart) * (all_cpu_charts_size - old_cpu_charts_size));
+ }
+ struct cpu_chart *cpu_chart = &all_cpu_charts[core];
+
+ if(unlikely(!cpu_chart->st)) {
+ cpu_chart->id = strdupz(id);
+
+ if(core == 0) {
+ title = "Total CPU utilization";
+ type = "system";
+ context = "system.cpu";
+ family = id;
+ priority = NETDATA_CHART_PRIO_SYSTEM_CPU;
+ }
+ else {
+ title = "Core utilization";
+ type = "cpu";
+ context = "cpu.cpu";
+ family = "utilization";
+ priority = NETDATA_CHART_PRIO_CPU_PER_CORE;
+
+ // TODO: check for /sys/devices/system/cpu/cpu*/cpufreq/scaling_cur_freq
+ // TODO: check for /sys/devices/system/cpu/cpu*/cpufreq/stats/time_in_state
+
+ char filename[FILENAME_MAX + 1];
+ struct stat stbuf;
+
+ if(do_core_throttle_count != CONFIG_BOOLEAN_NO) {
+ snprintfz(filename, FILENAME_MAX, core_throttle_count_filename, id);
+ if (stat(filename, &stbuf) == 0) {
+ cpu_chart->files[CORE_THROTTLE_COUNT_INDEX].filename = strdupz(filename);
+ cpu_chart->files[CORE_THROTTLE_COUNT_INDEX].fd = -1;
+ do_core_throttle_count = CONFIG_BOOLEAN_YES;
+ }
+ }
+
+ if(do_package_throttle_count != CONFIG_BOOLEAN_NO) {
+ snprintfz(filename, FILENAME_MAX, package_throttle_count_filename, id);
+ if (stat(filename, &stbuf) == 0) {
+ cpu_chart->files[PACKAGE_THROTTLE_COUNT_INDEX].filename = strdupz(filename);
+ cpu_chart->files[PACKAGE_THROTTLE_COUNT_INDEX].fd = -1;
+ do_package_throttle_count = CONFIG_BOOLEAN_YES;
+ }
+ }
+
+ if(do_scaling_cur_freq != CONFIG_BOOLEAN_NO) {
+ snprintfz(filename, FILENAME_MAX, scaling_cur_freq_filename, id);
+ if (stat(filename, &stbuf) == 0) {
+ cpu_chart->files[SCALING_CUR_FREQ_INDEX].filename = strdupz(filename);
+ cpu_chart->files[SCALING_CUR_FREQ_INDEX].fd = -1;
+ do_scaling_cur_freq = CONFIG_BOOLEAN_YES;
+ }
+ }
+ }
+
+ cpu_chart->st = rrdset_create_localhost(
+ type
+ , id
+ , NULL
+ , family
+ , context
+ , title
+ , "percentage"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_STAT_NAME
+ , priority + core
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ long multiplier = 1;
+ long divisor = 1; // sysconf(_SC_CLK_TCK);
+
+ cpu_chart->rd_guest_nice = rrddim_add(cpu_chart->st, "guest_nice", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ cpu_chart->rd_guest = rrddim_add(cpu_chart->st, "guest", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ cpu_chart->rd_steal = rrddim_add(cpu_chart->st, "steal", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ cpu_chart->rd_softirq = rrddim_add(cpu_chart->st, "softirq", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ cpu_chart->rd_irq = rrddim_add(cpu_chart->st, "irq", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ cpu_chart->rd_user = rrddim_add(cpu_chart->st, "user", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ cpu_chart->rd_system = rrddim_add(cpu_chart->st, "system", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ cpu_chart->rd_nice = rrddim_add(cpu_chart->st, "nice", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ cpu_chart->rd_iowait = rrddim_add(cpu_chart->st, "iowait", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ cpu_chart->rd_idle = rrddim_add(cpu_chart->st, "idle", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rrddim_hide(cpu_chart->st, "idle");
+
+ if(unlikely(core == 0 && cpus_var == NULL))
+ cpus_var = rrdvar_custom_host_variable_create(localhost, "active_processors");
+ }
+ else rrdset_next(cpu_chart->st);
+
+ rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_user, user);
+ rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_nice, nice);
+ rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_system, system);
+ rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_idle, idle);
+ rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_iowait, iowait);
+ rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_irq, irq);
+ rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_softirq, softirq);
+ rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_steal, steal);
+ rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_guest, guest);
+ rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_guest_nice, guest_nice);
+ rrdset_done(cpu_chart->st);
+ }
+ }
+ else if(unlikely(hash == hash_intr && strcmp(row_key, "intr") == 0)) {
+ if(likely(do_interrupts)) {
+ static RRDSET *st_intr = NULL;
+ static RRDDIM *rd_interrupts = NULL;
+ unsigned long long value = str2ull(procfile_lineword(ff, l, 1));
+
+ if(unlikely(!st_intr)) {
+ st_intr = rrdset_create_localhost(
+ "system"
+ , "intr"
+ , NULL
+ , "interrupts"
+ , NULL
+ , "CPU Interrupts"
+ , "interrupts/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_STAT_NAME
+ , NETDATA_CHART_PRIO_SYSTEM_INTR
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st_intr, RRDSET_FLAG_DETAIL);
+
+ rd_interrupts = rrddim_add(st_intr, "interrupts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st_intr);
+
+ rrddim_set_by_pointer(st_intr, rd_interrupts, value);
+ rrdset_done(st_intr);
+ }
+ }
+ else if(unlikely(hash == hash_ctxt && strcmp(row_key, "ctxt") == 0)) {
+ if(likely(do_context)) {
+ static RRDSET *st_ctxt = NULL;
+ static RRDDIM *rd_switches = NULL;
+ unsigned long long value = str2ull(procfile_lineword(ff, l, 1));
+
+ if(unlikely(!st_ctxt)) {
+ st_ctxt = rrdset_create_localhost(
+ "system"
+ , "ctxt"
+ , NULL
+ , "processes"
+ , NULL
+ , "CPU Context Switches"
+ , "context switches/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_STAT_NAME
+ , NETDATA_CHART_PRIO_SYSTEM_CTXT
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_switches = rrddim_add(st_ctxt, "switches", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st_ctxt);
+
+ rrddim_set_by_pointer(st_ctxt, rd_switches, value);
+ rrdset_done(st_ctxt);
+ }
+ }
+ else if(unlikely(hash == hash_processes && !processes && strcmp(row_key, "processes") == 0)) {
+ processes = str2ull(procfile_lineword(ff, l, 1));
+ }
+ else if(unlikely(hash == hash_procs_running && !running && strcmp(row_key, "procs_running") == 0)) {
+ running = str2ull(procfile_lineword(ff, l, 1));
+ }
+ else if(unlikely(hash == hash_procs_blocked && !blocked && strcmp(row_key, "procs_blocked") == 0)) {
+ blocked = str2ull(procfile_lineword(ff, l, 1));
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ if(likely(do_forks)) {
+ static RRDSET *st_forks = NULL;
+ static RRDDIM *rd_started = NULL;
+
+ if(unlikely(!st_forks)) {
+ st_forks = rrdset_create_localhost(
+ "system"
+ , "forks"
+ , NULL
+ , "processes"
+ , NULL
+ , "Started Processes"
+ , "processes/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_STAT_NAME
+ , NETDATA_CHART_PRIO_SYSTEM_FORKS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_flag_set(st_forks, RRDSET_FLAG_DETAIL);
+
+ rd_started = rrddim_add(st_forks, "started", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st_forks);
+
+ rrddim_set_by_pointer(st_forks, rd_started, processes);
+ rrdset_done(st_forks);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(likely(do_processes)) {
+ static RRDSET *st_processes = NULL;
+ static RRDDIM *rd_running = NULL;
+ static RRDDIM *rd_blocked = NULL;
+
+ if(unlikely(!st_processes)) {
+ st_processes = rrdset_create_localhost(
+ "system"
+ , "processes"
+ , NULL
+ , "processes"
+ , NULL
+ , "System Processes"
+ , "processes"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_STAT_NAME
+ , NETDATA_CHART_PRIO_SYSTEM_PROCESSES
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_running = rrddim_add(st_processes, "running", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rd_blocked = rrddim_add(st_processes, "blocked", NULL, -1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st_processes);
+
+ rrddim_set_by_pointer(st_processes, rd_running, running);
+ rrddim_set_by_pointer(st_processes, rd_blocked, blocked);
+ rrdset_done(st_processes);
+ }
+
+ if(likely(all_cpu_charts_size > 1)) {
+ if(likely(do_core_throttle_count != CONFIG_BOOLEAN_NO)) {
+ int r = read_per_core_files(&all_cpu_charts[1], all_cpu_charts_size - 1, CORE_THROTTLE_COUNT_INDEX);
+ if(likely(r != -1 && (do_core_throttle_count == CONFIG_BOOLEAN_YES || r > 0))) {
+ do_core_throttle_count = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_core_throttle_count = NULL;
+
+ if (unlikely(!st_core_throttle_count))
+ st_core_throttle_count = rrdset_create_localhost(
+ "cpu"
+ , "core_throttling"
+ , NULL
+ , "throttling"
+ , "cpu.core_throttling"
+ , "Core Thermal Throttling Events"
+ , "events/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_STAT_NAME
+ , NETDATA_CHART_PRIO_CORE_THROTTLING
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ else
+ rrdset_next(st_core_throttle_count);
+
+ chart_per_core_files(&all_cpu_charts[1], all_cpu_charts_size - 1, CORE_THROTTLE_COUNT_INDEX, st_core_throttle_count, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrdset_done(st_core_throttle_count);
+ }
+ }
+
+ if(likely(do_package_throttle_count != CONFIG_BOOLEAN_NO)) {
+ int r = read_per_core_files(&all_cpu_charts[1], all_cpu_charts_size - 1, PACKAGE_THROTTLE_COUNT_INDEX);
+ if(likely(r != -1 && (do_package_throttle_count == CONFIG_BOOLEAN_YES || r > 0))) {
+ do_package_throttle_count = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_package_throttle_count = NULL;
+
+ if(unlikely(!st_package_throttle_count))
+ st_package_throttle_count = rrdset_create_localhost(
+ "cpu"
+ , "package_throttling"
+ , NULL
+ , "throttling"
+ , "cpu.package_throttling"
+ , "Package Thermal Throttling Events"
+ , "events/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_STAT_NAME
+ , NETDATA_CHART_PRIO_PACKAGE_THROTTLING
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ else
+ rrdset_next(st_package_throttle_count);
+
+ chart_per_core_files(&all_cpu_charts[1], all_cpu_charts_size - 1, PACKAGE_THROTTLE_COUNT_INDEX, st_package_throttle_count, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrdset_done(st_package_throttle_count);
+ }
+ }
+
+ if(likely(do_scaling_cur_freq != CONFIG_BOOLEAN_NO)) {
+ int r = read_per_core_files(&all_cpu_charts[1], all_cpu_charts_size - 1, SCALING_CUR_FREQ_INDEX);
+ if(likely(r != -1 && (do_scaling_cur_freq == CONFIG_BOOLEAN_YES || r > 0))) {
+ do_scaling_cur_freq = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_scaling_cur_freq = NULL;
+
+ if(unlikely(!st_scaling_cur_freq))
+ st_scaling_cur_freq = rrdset_create_localhost(
+ "cpu"
+ , "scaling_cur_freq"
+ , NULL
+ , "cpufreq"
+ , "cpu.scaling_cur_freq"
+ , "Per CPU Core, Current CPU Scaling Frequency"
+ , "MHz"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_STAT_NAME
+ , 5003
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ else
+ rrdset_next(st_scaling_cur_freq);
+
+ chart_per_core_files(&all_cpu_charts[1], all_cpu_charts_size - 1, SCALING_CUR_FREQ_INDEX, st_scaling_cur_freq, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ rrdset_done(st_scaling_cur_freq);
+ }
+ }
+ }
+
+ if(cpus_var)
+ rrdvar_custom_host_variable_set(localhost, cpus_var, cores_found);
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_sys_kernel_random_entropy_avail.c b/collectors/proc.plugin/proc_sys_kernel_random_entropy_avail.c
new file mode 100644
index 000000000..20d2116ce
--- /dev/null
+++ b/collectors/proc.plugin/proc_sys_kernel_random_entropy_avail.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+int do_proc_sys_kernel_random_entropy_avail(int update_every, usec_t dt) {
+ (void)dt;
+
+ static procfile *ff = NULL;
+
+ if(unlikely(!ff)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/sys/kernel/random/entropy_avail");
+ ff = procfile_open(config_get("plugin:proc:/proc/sys/kernel/random/entropy_avail", "filename to monitor", filename), "", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time
+
+ unsigned long long entropy = str2ull(procfile_lineword(ff, 0, 0));
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd = NULL;
+
+ if(unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system"
+ , "entropy"
+ , NULL
+ , "entropy"
+ , NULL
+ , "Available Entropy"
+ , "entropy"
+ , PLUGIN_PROC_NAME
+ , "/proc/sys/kernel/random/entropy_avail"
+ , NETDATA_CHART_PRIO_SYSTEM_ENTROPY
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd = rrddim_add(st, "entropy", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd, entropy);
+ rrdset_done(st);
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_uptime.c b/collectors/proc.plugin/proc_uptime.c
new file mode 100644
index 000000000..142ae2d0c
--- /dev/null
+++ b/collectors/proc.plugin/proc_uptime.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+static inline collected_number uptime_from_boottime(void) {
+#ifdef CLOCK_BOOTTIME_IS_AVAILABLE
+ return now_boottime_usec() / 1000;
+#else
+ error("uptime cannot be read from CLOCK_BOOTTIME on this system.");
+ return 0;
+#endif
+}
+
+static procfile *read_proc_uptime_ff = NULL;
+static inline collected_number read_proc_uptime(void) {
+ if(unlikely(!read_proc_uptime_ff)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/uptime");
+
+ read_proc_uptime_ff = procfile_open(config_get("plugin:proc:/proc/uptime", "filename to monitor", filename), " \t", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!read_proc_uptime_ff)) return 0;
+ }
+
+ read_proc_uptime_ff = procfile_readall(read_proc_uptime_ff);
+ if(unlikely(!read_proc_uptime_ff)) return 0;
+
+ if(unlikely(procfile_lines(read_proc_uptime_ff) < 1)) {
+ error("/proc/uptime has no lines.");
+ return 0;
+ }
+ if(unlikely(procfile_linewords(read_proc_uptime_ff, 0) < 1)) {
+ error("/proc/uptime has less than 1 word in it.");
+ return 0;
+ }
+
+ return (collected_number)(strtold(procfile_lineword(read_proc_uptime_ff, 0, 0), NULL) * 1000.0);
+}
+
+int do_proc_uptime(int update_every, usec_t dt) {
+ (void)dt;
+
+ static int use_boottime = -1;
+
+ if(unlikely(use_boottime == -1)) {
+ collected_number uptime_boottime = uptime_from_boottime();
+ collected_number uptime_proc = read_proc_uptime();
+
+ long long delta = (long long)uptime_boottime - (long long)uptime_proc;
+ if(delta < 0) delta = -delta;
+
+ if(delta <= 1000 && uptime_boottime != 0) {
+ procfile_close(read_proc_uptime_ff);
+ info("Using now_boottime_usec() for uptime (dt is %lld ms)", delta);
+ use_boottime = 1;
+ }
+ else if(uptime_proc != 0) {
+ info("Using /proc/uptime for uptime (dt is %lld ms)", delta);
+ use_boottime = 0;
+ }
+ else {
+ error("Cannot find any way to read uptime on this system.");
+ return 1;
+ }
+ }
+
+ collected_number uptime;
+ if(use_boottime)
+ uptime = uptime_from_boottime();
+ else
+ uptime = read_proc_uptime();
+
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd = NULL;
+
+ if(unlikely(!st)) {
+
+ st = rrdset_create_localhost(
+ "system"
+ , "uptime"
+ , NULL
+ , "uptime"
+ , NULL
+ , "System Uptime"
+ , "seconds"
+ , PLUGIN_PROC_NAME
+ , "/proc/uptime"
+ , NETDATA_CHART_PRIO_SYSTEM_UPTIME
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd = rrddim_add(st, "uptime", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd, uptime);
+
+ rrdset_done(st);
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_vmstat.c b/collectors/proc.plugin/proc_vmstat.c
new file mode 100644
index 000000000..f7c93c20a
--- /dev/null
+++ b/collectors/proc.plugin/proc_vmstat.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_VMSTAT_NAME "/proc/vmstat"
+
+int do_proc_vmstat(int update_every, usec_t dt) {
+ (void)dt;
+
+ static procfile *ff = NULL;
+ static int do_swapio = -1, do_io = -1, do_pgfaults = -1, do_numa = -1;
+ static int has_numa = -1;
+
+ static ARL_BASE *arl_base = NULL;
+ static unsigned long long numa_foreign = 0ULL;
+ static unsigned long long numa_hint_faults = 0ULL;
+ static unsigned long long numa_hint_faults_local = 0ULL;
+ static unsigned long long numa_huge_pte_updates = 0ULL;
+ static unsigned long long numa_interleave = 0ULL;
+ static unsigned long long numa_local = 0ULL;
+ static unsigned long long numa_other = 0ULL;
+ static unsigned long long numa_pages_migrated = 0ULL;
+ static unsigned long long numa_pte_updates = 0ULL;
+ static unsigned long long pgfault = 0ULL;
+ static unsigned long long pgmajfault = 0ULL;
+ static unsigned long long pgpgin = 0ULL;
+ static unsigned long long pgpgout = 0ULL;
+ static unsigned long long pswpin = 0ULL;
+ static unsigned long long pswpout = 0ULL;
+
+ if(unlikely(!arl_base)) {
+ do_swapio = config_get_boolean_ondemand("plugin:proc:/proc/vmstat", "swap i/o", CONFIG_BOOLEAN_AUTO);
+ do_io = config_get_boolean("plugin:proc:/proc/vmstat", "disk i/o", 1);
+ do_pgfaults = config_get_boolean("plugin:proc:/proc/vmstat", "memory page faults", 1);
+ do_numa = config_get_boolean_ondemand("plugin:proc:/proc/vmstat", "system-wide numa metric summary", CONFIG_BOOLEAN_AUTO);
+
+
+ arl_base = arl_create("vmstat", NULL, 60);
+ arl_expect(arl_base, "pgfault", &pgfault);
+ arl_expect(arl_base, "pgmajfault", &pgmajfault);
+ arl_expect(arl_base, "pgpgin", &pgpgin);
+ arl_expect(arl_base, "pgpgout", &pgpgout);
+ arl_expect(arl_base, "pswpin", &pswpin);
+ arl_expect(arl_base, "pswpout", &pswpout);
+
+ if(do_numa == CONFIG_BOOLEAN_YES || (do_numa == CONFIG_BOOLEAN_AUTO && get_numa_node_count() >= 2)) {
+ arl_expect(arl_base, "numa_foreign", &numa_foreign);
+ arl_expect(arl_base, "numa_hint_faults_local", &numa_hint_faults_local);
+ arl_expect(arl_base, "numa_hint_faults", &numa_hint_faults);
+ arl_expect(arl_base, "numa_huge_pte_updates", &numa_huge_pte_updates);
+ arl_expect(arl_base, "numa_interleave", &numa_interleave);
+ arl_expect(arl_base, "numa_local", &numa_local);
+ arl_expect(arl_base, "numa_other", &numa_other);
+ arl_expect(arl_base, "numa_pages_migrated", &numa_pages_migrated);
+ arl_expect(arl_base, "numa_pte_updates", &numa_pte_updates);
+ }
+ else {
+ // Do not expect numa metrics when they are not needed.
+ // By not adding them, the ARL will stop processing the file
+ // when all the expected metrics are collected.
+ // Also ARL will not parse their values.
+ has_numa = 0;
+ do_numa = CONFIG_BOOLEAN_NO;
+ }
+ }
+
+ if(unlikely(!ff)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/vmstat");
+ ff = procfile_open(config_get("plugin:proc:/proc/vmstat", "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time
+
+ size_t lines = procfile_lines(ff), l;
+
+ arl_begin(arl_base);
+ for(l = 0; l < lines ;l++) {
+ size_t words = procfile_linewords(ff, l);
+ if(unlikely(words < 2)) {
+ if(unlikely(words)) error("Cannot read /proc/vmstat line %zu. Expected 2 params, read %zu.", l, words);
+ continue;
+ }
+
+ if(unlikely(arl_check(arl_base,
+ procfile_lineword(ff, l, 0),
+ procfile_lineword(ff, l, 1)))) break;
+ }
+
+ // --------------------------------------------------------------------
+
+ if(pswpin || pswpout || do_swapio == CONFIG_BOOLEAN_YES) {
+ do_swapio = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_swapio = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL;
+
+ if(unlikely(!st_swapio)) {
+ st_swapio = rrdset_create_localhost(
+ "system"
+ , "swapio"
+ , NULL
+ , "swap"
+ , NULL
+ , "Swap I/O"
+ , "kilobytes/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_VMSTAT_NAME
+ , NETDATA_CHART_PRIO_SYSTEM_SWAPIO
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_in = rrddim_add(st_swapio, "in", NULL, sysconf(_SC_PAGESIZE), 1024, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st_swapio, "out", NULL, -sysconf(_SC_PAGESIZE), 1024, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st_swapio);
+
+ rrddim_set_by_pointer(st_swapio, rd_in, pswpin);
+ rrddim_set_by_pointer(st_swapio, rd_out, pswpout);
+ rrdset_done(st_swapio);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_io) {
+ static RRDSET *st_io = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL;
+
+ if(unlikely(!st_io)) {
+ st_io = rrdset_create_localhost(
+ "system"
+ , "pgpgio"
+ , NULL
+ , "disk"
+ , NULL
+ , "Memory Paged from/to disk"
+ , "kilobytes/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_VMSTAT_NAME
+ , NETDATA_CHART_PRIO_SYSTEM_PGPGIO
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_in = rrddim_add(st_io, "in", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st_io, "out", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st_io);
+
+ rrddim_set_by_pointer(st_io, rd_in, pgpgin);
+ rrddim_set_by_pointer(st_io, rd_out, pgpgout);
+ rrdset_done(st_io);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_pgfaults) {
+ static RRDSET *st_pgfaults = NULL;
+ static RRDDIM *rd_minor = NULL, *rd_major = NULL;
+
+ if(unlikely(!st_pgfaults)) {
+ st_pgfaults = rrdset_create_localhost(
+ "mem"
+ , "pgfaults"
+ , NULL
+ , "system"
+ , NULL
+ , "Memory Page Faults"
+ , "page faults/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_VMSTAT_NAME
+ , NETDATA_CHART_PRIO_MEM_SYSTEM_PGFAULTS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st_pgfaults, RRDSET_FLAG_DETAIL);
+
+ rd_minor = rrddim_add(st_pgfaults, "minor", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_major = rrddim_add(st_pgfaults, "major", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st_pgfaults);
+
+ rrddim_set_by_pointer(st_pgfaults, rd_minor, pgfault);
+ rrddim_set_by_pointer(st_pgfaults, rd_major, pgmajfault);
+ rrdset_done(st_pgfaults);
+ }
+
+ // --------------------------------------------------------------------
+
+ // Ondemand criteria for NUMA. Since this won't change at run time, we
+ // check it only once. We check whether the node count is >= 2 because
+ // single-node systems have uninteresting statistics (since all accesses
+ // are local).
+ if(unlikely(has_numa == -1))
+
+ has_numa = (numa_local || numa_foreign || numa_interleave || numa_other || numa_pte_updates ||
+ numa_huge_pte_updates || numa_hint_faults || numa_hint_faults_local || numa_pages_migrated) ? 1 : 0;
+
+ if(do_numa == CONFIG_BOOLEAN_YES || (do_numa == CONFIG_BOOLEAN_AUTO && has_numa)) {
+ do_numa = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *st_numa = NULL;
+ static RRDDIM *rd_local = NULL, *rd_foreign = NULL, *rd_interleave = NULL, *rd_other = NULL, *rd_pte_updates = NULL, *rd_huge_pte_updates = NULL, *rd_hint_faults = NULL, *rd_hint_faults_local = NULL, *rd_pages_migrated = NULL;
+
+ if(unlikely(!st_numa)) {
+ st_numa = rrdset_create_localhost(
+ "mem"
+ , "numa"
+ , NULL
+ , "numa"
+ , NULL
+ , "NUMA events"
+ , "events/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_VMSTAT_NAME
+ , NETDATA_CHART_PRIO_MEM_NUMA
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(st_numa, RRDSET_FLAG_DETAIL);
+
+ // These depend on CONFIG_NUMA in the kernel.
+ rd_local = rrddim_add(st_numa, "local", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_foreign = rrddim_add(st_numa, "foreign", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_interleave = rrddim_add(st_numa, "interleave", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_other = rrddim_add(st_numa, "other", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ // The following stats depend on CONFIG_NUMA_BALANCING in the
+ // kernel.
+ rd_pte_updates = rrddim_add(st_numa, "pte_updates", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_huge_pte_updates = rrddim_add(st_numa, "huge_pte_updates", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_hint_faults = rrddim_add(st_numa, "hint_faults", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_hint_faults_local = rrddim_add(st_numa, "hint_faults_local", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_pages_migrated = rrddim_add(st_numa, "pages_migrated", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(st_numa);
+
+ rrddim_set_by_pointer(st_numa, rd_local, numa_local);
+ rrddim_set_by_pointer(st_numa, rd_foreign, numa_foreign);
+ rrddim_set_by_pointer(st_numa, rd_interleave, numa_interleave);
+ rrddim_set_by_pointer(st_numa, rd_other, numa_other);
+
+ rrddim_set_by_pointer(st_numa, rd_pte_updates, numa_pte_updates);
+ rrddim_set_by_pointer(st_numa, rd_huge_pte_updates, numa_huge_pte_updates);
+ rrddim_set_by_pointer(st_numa, rd_hint_faults, numa_hint_faults);
+ rrddim_set_by_pointer(st_numa, rd_hint_faults_local, numa_hint_faults_local);
+ rrddim_set_by_pointer(st_numa, rd_pages_migrated, numa_pages_migrated);
+
+ rrdset_done(st_numa);
+ }
+
+ return 0;
+}
+
diff --git a/collectors/proc.plugin/sys_devices_system_edac_mc.c b/collectors/proc.plugin/sys_devices_system_edac_mc.c
new file mode 100644
index 000000000..03cbfff83
--- /dev/null
+++ b/collectors/proc.plugin/sys_devices_system_edac_mc.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+struct mc {
+ char *name;
+ char ce_updated;
+ char ue_updated;
+
+ char *ce_count_filename;
+ char *ue_count_filename;
+
+ procfile *ce_ff;
+ procfile *ue_ff;
+
+ collected_number ce_count;
+ collected_number ue_count;
+
+ RRDDIM *ce_rd;
+ RRDDIM *ue_rd;
+
+ struct mc *next;
+};
+static struct mc *mc_root = NULL;
+
+static void find_all_mc() {
+ char name[FILENAME_MAX + 1];
+ snprintfz(name, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/edac/mc");
+ char *dirname = config_get("plugin:proc:/sys/devices/system/edac/mc", "directory to monitor", name);
+
+ DIR *dir = opendir(dirname);
+ if(unlikely(!dir)) {
+ error("Cannot read ECC memory errors directory '%s'", dirname);
+ return;
+ }
+
+ struct dirent *de = NULL;
+ while((de = readdir(dir))) {
+ if(de->d_type == DT_DIR && de->d_name[0] == 'm' && de->d_name[1] == 'c' && isdigit(de->d_name[2])) {
+ struct mc *m = callocz(1, sizeof(struct mc));
+ m->name = strdupz(de->d_name);
+
+ struct stat st;
+
+ snprintfz(name, FILENAME_MAX, "%s/%s/ce_count", dirname, de->d_name);
+ if(stat(name, &st) != -1)
+ m->ce_count_filename = strdupz(name);
+
+ snprintfz(name, FILENAME_MAX, "%s/%s/ue_count", dirname, de->d_name);
+ if(stat(name, &st) != -1)
+ m->ue_count_filename = strdupz(name);
+
+ if(!m->ce_count_filename && !m->ue_count_filename) {
+ freez(m->name);
+ freez(m);
+ }
+ else {
+ m->next = mc_root;
+ mc_root = m;
+ }
+ }
+ }
+
+ closedir(dir);
+}
+
+int do_proc_sys_devices_system_edac_mc(int update_every, usec_t dt) {
+ (void)dt;
+
+ if(unlikely(mc_root == NULL)) {
+ find_all_mc();
+ if(unlikely(mc_root == NULL))
+ return 1;
+ }
+
+ static int do_ce = -1, do_ue = -1;
+ calculated_number ce_sum = 0, ue_sum = 0;
+ struct mc *m;
+
+ if(unlikely(do_ce == -1)) {
+ do_ce = config_get_boolean_ondemand("plugin:proc:/sys/devices/system/edac/mc", "enable ECC memory correctable errors", CONFIG_BOOLEAN_AUTO);
+ do_ue = config_get_boolean_ondemand("plugin:proc:/sys/devices/system/edac/mc", "enable ECC memory uncorrectable errors", CONFIG_BOOLEAN_AUTO);
+ }
+
+ if(do_ce != CONFIG_BOOLEAN_NO) {
+ for(m = mc_root; m; m = m->next) {
+ if(m->ce_count_filename) {
+ m->ce_updated = 0;
+
+ if(unlikely(!m->ce_ff)) {
+ m->ce_ff = procfile_open(m->ce_count_filename, " \t", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!m->ce_ff))
+ continue;
+ }
+
+ m->ce_ff = procfile_readall(m->ce_ff);
+ if(unlikely(!m->ce_ff || procfile_lines(m->ce_ff) < 1 || procfile_linewords(m->ce_ff, 0) < 1))
+ continue;
+
+ m->ce_count = str2ull(procfile_lineword(m->ce_ff, 0, 0));
+ ce_sum += m->ce_count;
+ m->ce_updated = 1;
+ }
+ }
+ }
+
+ if(do_ue != CONFIG_BOOLEAN_NO) {
+ for(m = mc_root; m; m = m->next) {
+ if(m->ue_count_filename) {
+ m->ue_updated = 0;
+
+ if(unlikely(!m->ue_ff)) {
+ m->ue_ff = procfile_open(m->ue_count_filename, " \t", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!m->ue_ff))
+ continue;
+ }
+
+ m->ue_ff = procfile_readall(m->ue_ff);
+ if(unlikely(!m->ue_ff || procfile_lines(m->ue_ff) < 1 || procfile_linewords(m->ue_ff, 0) < 1))
+ continue;
+
+ m->ue_count = str2ull(procfile_lineword(m->ue_ff, 0, 0));
+ ue_sum += m->ue_count;
+ m->ue_updated = 1;
+ }
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_ce == CONFIG_BOOLEAN_YES || (do_ce == CONFIG_BOOLEAN_AUTO && ce_sum > 0)) {
+ do_ce = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *ce_st = NULL;
+
+ if(unlikely(!ce_st)) {
+ ce_st = rrdset_create_localhost(
+ "mem"
+ , "ecc_ce"
+ , NULL
+ , "ecc"
+ , NULL
+ , "ECC Memory Correctable Errors"
+ , "errors"
+ , PLUGIN_PROC_NAME
+ , "/sys/devices/system/edac/mc"
+ , NETDATA_CHART_PRIO_MEM_HW_ECC_CE
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ }
+ else
+ rrdset_next(ce_st);
+
+ for(m = mc_root; m; m = m->next) {
+ if (m->ce_count_filename && m->ce_updated) {
+ if(unlikely(!m->ce_rd))
+ m->ce_rd = rrddim_add(ce_st, m->name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(ce_st, m->ce_rd, m->ce_count);
+ }
+ }
+
+ rrdset_done(ce_st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(do_ue == CONFIG_BOOLEAN_YES || (do_ue == CONFIG_BOOLEAN_AUTO && ue_sum > 0)) {
+ do_ue = CONFIG_BOOLEAN_YES;
+
+ static RRDSET *ue_st = NULL;
+
+ if(unlikely(!ue_st)) {
+ ue_st = rrdset_create_localhost(
+ "mem"
+ , "ecc_ue"
+ , NULL
+ , "ecc"
+ , NULL
+ , "ECC Memory Uncorrectable Errors"
+ , "errors"
+ , PLUGIN_PROC_NAME
+ , "/sys/devices/system/edac/mc"
+ , NETDATA_CHART_PRIO_MEM_HW_ECC_UE
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ }
+ else
+ rrdset_next(ue_st);
+
+ for(m = mc_root; m; m = m->next) {
+ if (m->ue_count_filename && m->ue_updated) {
+ if(unlikely(!m->ue_rd))
+ m->ue_rd = rrddim_add(ue_st, m->name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrddim_set_by_pointer(ue_st, m->ue_rd, m->ue_count);
+ }
+ }
+
+ rrdset_done(ue_st);
+ }
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/sys_devices_system_node.c b/collectors/proc.plugin/sys_devices_system_node.c
new file mode 100644
index 000000000..6e6d0acca
--- /dev/null
+++ b/collectors/proc.plugin/sys_devices_system_node.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+struct node {
+ char *name;
+ char *numastat_filename;
+ procfile *numastat_ff;
+ RRDSET *numastat_st;
+ struct node *next;
+};
+static struct node *numa_root = NULL;
+
+static int find_all_nodes() {
+ int numa_node_count = 0;
+ char name[FILENAME_MAX + 1];
+ snprintfz(name, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/node");
+ char *dirname = config_get("plugin:proc:/sys/devices/system/node", "directory to monitor", name);
+
+ DIR *dir = opendir(dirname);
+ if(!dir) {
+ error("Cannot read NUMA node directory '%s'", dirname);
+ return 0;
+ }
+
+ struct dirent *de = NULL;
+ while((de = readdir(dir))) {
+ if(de->d_type != DT_DIR)
+ continue;
+
+ if(strncmp(de->d_name, "node", 4) != 0)
+ continue;
+
+ if(!isdigit(de->d_name[4]))
+ continue;
+
+ numa_node_count++;
+
+ struct node *m = callocz(1, sizeof(struct node));
+ m->name = strdupz(de->d_name);
+
+ struct stat st;
+
+ snprintfz(name, FILENAME_MAX, "%s/%s/numastat", dirname, de->d_name);
+ if(stat(name, &st) == -1) {
+ freez(m->name);
+ freez(m);
+ continue;
+ }
+
+ m->numastat_filename = strdupz(name);
+
+ m->next = numa_root;
+ numa_root = m;
+ }
+
+ closedir(dir);
+
+ return numa_node_count;
+}
+
+int do_proc_sys_devices_system_node(int update_every, usec_t dt) {
+ (void)dt;
+
+ static uint32_t hash_local_node = 0, hash_numa_foreign = 0, hash_interleave_hit = 0, hash_other_node = 0, hash_numa_hit = 0, hash_numa_miss = 0;
+ static int do_numastat = -1, numa_node_count = 0;
+ struct node *m;
+
+ if(unlikely(numa_root == NULL)) {
+ numa_node_count = find_all_nodes();
+ if(unlikely(numa_root == NULL))
+ return 1;
+ }
+
+ if(unlikely(do_numastat == -1)) {
+ do_numastat = config_get_boolean_ondemand("plugin:proc:/sys/devices/system/node", "enable per-node numa metrics", CONFIG_BOOLEAN_AUTO);
+
+ hash_local_node = simple_hash("local_node");
+ hash_numa_foreign = simple_hash("numa_foreign");
+ hash_interleave_hit = simple_hash("interleave_hit");
+ hash_other_node = simple_hash("other_node");
+ hash_numa_hit = simple_hash("numa_hit");
+ hash_numa_miss = simple_hash("numa_miss");
+ }
+
+ if(do_numastat == CONFIG_BOOLEAN_YES || (do_numastat == CONFIG_BOOLEAN_AUTO && numa_node_count >= 2)) {
+ for(m = numa_root; m; m = m->next) {
+ if(m->numastat_filename) {
+
+ if(unlikely(!m->numastat_ff)) {
+ m->numastat_ff = procfile_open(m->numastat_filename, " ", PROCFILE_FLAG_DEFAULT);
+
+ if(unlikely(!m->numastat_ff))
+ continue;
+ }
+
+ m->numastat_ff = procfile_readall(m->numastat_ff);
+ if(unlikely(!m->numastat_ff || procfile_lines(m->numastat_ff) < 1 || procfile_linewords(m->numastat_ff, 0) < 1))
+ continue;
+
+ if(unlikely(!m->numastat_st)) {
+ m->numastat_st = rrdset_create_localhost(
+ "mem"
+ , m->name
+ , NULL
+ , "numa"
+ , NULL
+ , "NUMA events"
+ , "events/s"
+ , PLUGIN_PROC_NAME
+ , "/sys/devices/system/node"
+ , NETDATA_CHART_PRIO_MEM_NUMA_NODES
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(m->numastat_st, RRDSET_FLAG_DETAIL);
+
+ rrddim_add(m->numastat_st, "numa_hit", "hit", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(m->numastat_st, "numa_miss", "miss", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(m->numastat_st, "local_node", "local", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(m->numastat_st, "numa_foreign", "foreign", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(m->numastat_st, "interleave_hit", "interleave", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(m->numastat_st, "other_node", "other", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ }
+ else rrdset_next(m->numastat_st);
+
+ size_t lines = procfile_lines(m->numastat_ff), l;
+ for(l = 0; l < lines; l++) {
+ size_t words = procfile_linewords(m->numastat_ff, l);
+
+ if(unlikely(words < 2)) {
+ if(unlikely(words))
+ error("Cannot read %s numastat line %zu. Expected 2 params, read %zu.", m->name, l, words);
+ continue;
+ }
+
+ char *name = procfile_lineword(m->numastat_ff, l, 0);
+ char *value = procfile_lineword(m->numastat_ff, l, 1);
+
+ if (unlikely(!name || !*name || !value || !*value))
+ continue;
+
+ uint32_t hash = simple_hash(name);
+ if(likely(
+ (hash == hash_numa_hit && !strcmp(name, "numa_hit"))
+ || (hash == hash_numa_miss && !strcmp(name, "numa_miss"))
+ || (hash == hash_local_node && !strcmp(name, "local_node"))
+ || (hash == hash_numa_foreign && !strcmp(name, "numa_foreign"))
+ || (hash == hash_interleave_hit && !strcmp(name, "interleave_hit"))
+ || (hash == hash_other_node && !strcmp(name, "other_node"))
+ ))
+ rrddim_set(m->numastat_st, name, (collected_number)str2kernel_uint_t(value));
+ }
+
+ rrdset_done(m->numastat_st);
+ }
+ }
+ }
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/sys_fs_btrfs.c b/collectors/proc.plugin/sys_fs_btrfs.c
new file mode 100644
index 000000000..ed980cea5
--- /dev/null
+++ b/collectors/proc.plugin/sys_fs_btrfs.c
@@ -0,0 +1,722 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_BTRFS_NAME "/sys/fs/btrfs"
+
+typedef struct btrfs_disk {
+ char *name;
+ uint32_t hash;
+ int exists;
+
+ char *size_filename;
+ char *hw_sector_size_filename;
+ unsigned long long size;
+ unsigned long long hw_sector_size;
+
+ struct btrfs_disk *next;
+} BTRFS_DISK;
+
+typedef struct btrfs_node {
+ int exists;
+ int logged_error;
+
+ char *id;
+ uint32_t hash;
+
+ char *label;
+
+ // unsigned long long int sectorsize;
+ // unsigned long long int nodesize;
+ // unsigned long long int quota_override;
+
+ #define declare_btrfs_allocation_section_field(SECTION, FIELD) \
+ char *allocation_ ## SECTION ## _ ## FIELD ## _filename; \
+ unsigned long long int allocation_ ## SECTION ## _ ## FIELD;
+
+ #define declare_btrfs_allocation_field(FIELD) \
+ char *allocation_ ## FIELD ## _filename; \
+ unsigned long long int allocation_ ## FIELD;
+
+ RRDSET *st_allocation_disks;
+ RRDDIM *rd_allocation_disks_unallocated;
+ RRDDIM *rd_allocation_disks_data_used;
+ RRDDIM *rd_allocation_disks_data_free;
+ RRDDIM *rd_allocation_disks_metadata_used;
+ RRDDIM *rd_allocation_disks_metadata_free;
+ RRDDIM *rd_allocation_disks_system_used;
+ RRDDIM *rd_allocation_disks_system_free;
+ unsigned long long all_disks_total;
+
+ RRDSET *st_allocation_data;
+ RRDDIM *rd_allocation_data_free;
+ RRDDIM *rd_allocation_data_used;
+ declare_btrfs_allocation_section_field(data, total_bytes)
+ declare_btrfs_allocation_section_field(data, bytes_used)
+ declare_btrfs_allocation_section_field(data, disk_total)
+ declare_btrfs_allocation_section_field(data, disk_used)
+
+ RRDSET *st_allocation_metadata;
+ RRDDIM *rd_allocation_metadata_free;
+ RRDDIM *rd_allocation_metadata_used;
+ RRDDIM *rd_allocation_metadata_reserved;
+ declare_btrfs_allocation_section_field(metadata, total_bytes)
+ declare_btrfs_allocation_section_field(metadata, bytes_used)
+ declare_btrfs_allocation_section_field(metadata, disk_total)
+ declare_btrfs_allocation_section_field(metadata, disk_used)
+ //declare_btrfs_allocation_field(global_rsv_reserved)
+ declare_btrfs_allocation_field(global_rsv_size)
+
+ RRDSET *st_allocation_system;
+ RRDDIM *rd_allocation_system_free;
+ RRDDIM *rd_allocation_system_used;
+ declare_btrfs_allocation_section_field(system, total_bytes)
+ declare_btrfs_allocation_section_field(system, bytes_used)
+ declare_btrfs_allocation_section_field(system, disk_total)
+ declare_btrfs_allocation_section_field(system, disk_used)
+
+ BTRFS_DISK *disks;
+
+ struct btrfs_node *next;
+} BTRFS_NODE;
+
+static BTRFS_NODE *nodes = NULL;
+
+static inline void btrfs_free_disk(BTRFS_DISK *d) {
+ freez(d->name);
+ freez(d->size_filename);
+ freez(d->hw_sector_size_filename);
+ freez(d);
+}
+
+static inline void btrfs_free_node(BTRFS_NODE *node) {
+ // info("BTRFS: destroying '%s'", node->id);
+
+ if(node->st_allocation_disks)
+ rrdset_is_obsolete(node->st_allocation_disks);
+
+ if(node->st_allocation_data)
+ rrdset_is_obsolete(node->st_allocation_data);
+
+ if(node->st_allocation_metadata)
+ rrdset_is_obsolete(node->st_allocation_metadata);
+
+ if(node->st_allocation_system)
+ rrdset_is_obsolete(node->st_allocation_system);
+
+ freez(node->allocation_data_bytes_used_filename);
+ freez(node->allocation_data_total_bytes_filename);
+
+ freez(node->allocation_metadata_bytes_used_filename);
+ freez(node->allocation_metadata_total_bytes_filename);
+
+ freez(node->allocation_system_bytes_used_filename);
+ freez(node->allocation_system_total_bytes_filename);
+
+ while(node->disks) {
+ BTRFS_DISK *d = node->disks;
+ node->disks = node->disks->next;
+ btrfs_free_disk(d);
+ }
+
+ freez(node->label);
+ freez(node->id);
+ freez(node);
+}
+
+static inline int find_btrfs_disks(BTRFS_NODE *node, const char *path) {
+ char filename[FILENAME_MAX + 1];
+
+ node->all_disks_total = 0;
+
+ BTRFS_DISK *d;
+ for(d = node->disks ; d ; d = d->next)
+ d->exists = 0;
+
+ DIR *dir = opendir(path);
+ if (!dir) {
+ if(!node->logged_error) {
+ error("BTRFS: Cannot open directory '%s'.", path);
+ node->logged_error = 1;
+ }
+ return 1;
+ }
+ node->logged_error = 0;
+
+ struct dirent *de = NULL;
+ while ((de = readdir(dir))) {
+ if (de->d_type != DT_LNK
+ || !strcmp(de->d_name, ".")
+ || !strcmp(de->d_name, "..")
+ ) {
+ // info("BTRFS: ignoring '%s'", de->d_name);
+ continue;
+ }
+
+ uint32_t hash = simple_hash(de->d_name);
+
+ // --------------------------------------------------------------------
+ // search for it
+
+ for(d = node->disks ; d ; d = d->next) {
+ if(hash == d->hash && !strcmp(de->d_name, d->name))
+ break;
+ }
+
+ // --------------------------------------------------------------------
+ // did we find it?
+
+ if(!d) {
+ d = callocz(sizeof(BTRFS_DISK), 1);
+
+ d->name = strdupz(de->d_name);
+ d->hash = simple_hash(d->name);
+
+ snprintfz(filename, FILENAME_MAX, "%s/%s/size", path, de->d_name);
+ d->size_filename = strdupz(filename);
+
+ // for bcache
+ snprintfz(filename, FILENAME_MAX, "%s/%s/bcache/../queue/hw_sector_size", path, de->d_name);
+ struct stat sb;
+ if(stat(filename, &sb) == -1) {
+ // for disks
+ snprintfz(filename, FILENAME_MAX, "%s/%s/queue/hw_sector_size", path, de->d_name);
+ if(stat(filename, &sb) == -1)
+ // for partitions
+ snprintfz(filename, FILENAME_MAX, "%s/%s/../queue/hw_sector_size", path, de->d_name);
+ }
+
+ d->hw_sector_size_filename = strdupz(filename);
+
+ // link it
+ d->next = node->disks;
+ node->disks = d;
+ }
+
+ d->exists = 1;
+
+
+ // --------------------------------------------------------------------
+ // update the values
+
+ if(read_single_number_file(d->size_filename, &d->size) != 0) {
+ error("BTRFS: failed to read '%s'", d->size_filename);
+ d->exists = 0;
+ continue;
+ }
+
+ if(read_single_number_file(d->hw_sector_size_filename, &d->hw_sector_size) != 0) {
+ error("BTRFS: failed to read '%s'", d->hw_sector_size_filename);
+ d->exists = 0;
+ continue;
+ }
+
+ node->all_disks_total += d->size * d->hw_sector_size;
+ }
+ closedir(dir);
+
+ // ------------------------------------------------------------------------
+ // cleanup
+
+ BTRFS_DISK *last = NULL;
+ d = node->disks;
+
+ while(d) {
+ if(unlikely(!d->exists)) {
+ if(unlikely(node->disks == d)) {
+ node->disks = d->next;
+ btrfs_free_disk(d);
+ d = node->disks;
+ last = NULL;
+ }
+ else {
+ last->next = d->next;
+ btrfs_free_disk(d);
+ d = last->next;
+ }
+
+ continue;
+ }
+
+ last = d;
+ d = d->next;
+ }
+
+ return 0;
+}
+
+
+static inline int find_all_btrfs_pools(const char *path) {
+ static int logged_error = 0;
+ char filename[FILENAME_MAX + 1];
+
+ BTRFS_NODE *node;
+ for(node = nodes ; node ; node = node->next)
+ node->exists = 0;
+
+ DIR *dir = opendir(path);
+ if (!dir) {
+ if(!logged_error) {
+ error("BTRFS: Cannot open directory '%s'.", path);
+ logged_error = 1;
+ }
+ return 1;
+ }
+ logged_error = 0;
+
+ struct dirent *de = NULL;
+ while ((de = readdir(dir))) {
+ if(de->d_type != DT_DIR
+ || !strcmp(de->d_name, ".")
+ || !strcmp(de->d_name, "..")
+ || !strcmp(de->d_name, "features")
+ ) {
+ // info("BTRFS: ignoring '%s'", de->d_name);
+ continue;
+ }
+
+ uint32_t hash = simple_hash(de->d_name);
+
+ // search for it
+ for(node = nodes ; node ; node = node->next) {
+ if(hash == node->hash && !strcmp(de->d_name, node->id))
+ break;
+ }
+
+ // did we find it?
+ if(node) {
+ // info("BTRFS: already exists '%s'", de->d_name);
+ node->exists = 1;
+
+ // update the disk sizes
+ snprintfz(filename, FILENAME_MAX, "%s/%s/devices", path, de->d_name);
+ find_btrfs_disks(node, filename);
+
+ continue;
+ }
+
+ // info("BTRFS: adding '%s'", de->d_name);
+
+ // not found, create it
+ node = callocz(sizeof(BTRFS_NODE), 1);
+
+ node->id = strdupz(de->d_name);
+ node->hash = simple_hash(node->id);
+ node->exists = 1;
+
+ {
+ char label[FILENAME_MAX + 1] = "";
+
+ snprintfz(filename, FILENAME_MAX, "%s/%s/label", path, de->d_name);
+ read_file(filename, label, FILENAME_MAX);
+
+ char *s = label;
+ if (s[0])
+ s = trim(label);
+
+ if(s && s[0])
+ node->label = strdupz(s);
+ else
+ node->label = strdupz(node->id);
+ }
+
+ //snprintfz(filename, FILENAME_MAX, "%s/%s/sectorsize", path, de->d_name);
+ //if(read_single_number_file(filename, &node->sectorsize) != 0) {
+ // error("BTRFS: failed to read '%s'", filename);
+ // btrfs_free_node(node);
+ // continue;
+ //}
+
+ //snprintfz(filename, FILENAME_MAX, "%s/%s/nodesize", path, de->d_name);
+ //if(read_single_number_file(filename, &node->nodesize) != 0) {
+ // error("BTRFS: failed to read '%s'", filename);
+ // btrfs_free_node(node);
+ // continue;
+ //}
+
+ //snprintfz(filename, FILENAME_MAX, "%s/%s/quota_override", path, de->d_name);
+ //if(read_single_number_file(filename, &node->quota_override) != 0) {
+ // error("BTRFS: failed to read '%s'", filename);
+ // btrfs_free_node(node);
+ // continue;
+ //}
+
+ // --------------------------------------------------------------------
+ // macros to simplify our life
+
+ #define init_btrfs_allocation_field(FIELD) {\
+ snprintfz(filename, FILENAME_MAX, "%s/%s/allocation/" #FIELD, path, de->d_name); \
+ if(read_single_number_file(filename, &node->allocation_ ## FIELD) != 0) {\
+ error("BTRFS: failed to read '%s'", filename);\
+ btrfs_free_node(node);\
+ continue;\
+ }\
+ if(!node->allocation_ ## FIELD ## _filename)\
+ node->allocation_ ## FIELD ## _filename = strdupz(filename);\
+ }
+
+ #define init_btrfs_allocation_section_field(SECTION, FIELD) {\
+ snprintfz(filename, FILENAME_MAX, "%s/%s/allocation/" #SECTION "/" #FIELD, path, de->d_name); \
+ if(read_single_number_file(filename, &node->allocation_ ## SECTION ## _ ## FIELD) != 0) {\
+ error("BTRFS: failed to read '%s'", filename);\
+ btrfs_free_node(node);\
+ continue;\
+ }\
+ if(!node->allocation_ ## SECTION ## _ ## FIELD ## _filename)\
+ node->allocation_ ## SECTION ## _ ## FIELD ## _filename = strdupz(filename);\
+ }
+
+ // --------------------------------------------------------------------
+ // allocation/data
+
+ init_btrfs_allocation_section_field(data, total_bytes);
+ init_btrfs_allocation_section_field(data, bytes_used);
+ init_btrfs_allocation_section_field(data, disk_total);
+ init_btrfs_allocation_section_field(data, disk_used);
+
+
+ // --------------------------------------------------------------------
+ // allocation/metadata
+
+ init_btrfs_allocation_section_field(metadata, total_bytes);
+ init_btrfs_allocation_section_field(metadata, bytes_used);
+ init_btrfs_allocation_section_field(metadata, disk_total);
+ init_btrfs_allocation_section_field(metadata, disk_used);
+
+ init_btrfs_allocation_field(global_rsv_size);
+ // init_btrfs_allocation_field(global_rsv_reserved);
+
+
+ // --------------------------------------------------------------------
+ // allocation/system
+
+ init_btrfs_allocation_section_field(system, total_bytes);
+ init_btrfs_allocation_section_field(system, bytes_used);
+ init_btrfs_allocation_section_field(system, disk_total);
+ init_btrfs_allocation_section_field(system, disk_used);
+
+
+ // --------------------------------------------------------------------
+ // find all disks related to this node
+ // and collect their sizes
+
+ snprintfz(filename, FILENAME_MAX, "%s/%s/devices", path, de->d_name);
+ find_btrfs_disks(node, filename);
+
+
+ // --------------------------------------------------------------------
+ // link it
+
+ // info("BTRFS: linking '%s'", node->id);
+ node->next = nodes;
+ nodes = node;
+ }
+ closedir(dir);
+
+
+ // ------------------------------------------------------------------------
+ // cleanup
+
+ BTRFS_NODE *last = NULL;
+ node = nodes;
+
+ while(node) {
+ if(unlikely(!node->exists)) {
+ if(unlikely(nodes == node)) {
+ nodes = node->next;
+ btrfs_free_node(node);
+ node = nodes;
+ last = NULL;
+ }
+ else {
+ last->next = node->next;
+ btrfs_free_node(node);
+ node = last->next;
+ }
+
+ continue;
+ }
+
+ last = node;
+ node = node->next;
+ }
+
+ return 0;
+}
+
+int do_sys_fs_btrfs(int update_every, usec_t dt) {
+ static int initialized = 0
+ , do_allocation_disks = CONFIG_BOOLEAN_AUTO
+ , do_allocation_system = CONFIG_BOOLEAN_AUTO
+ , do_allocation_data = CONFIG_BOOLEAN_AUTO
+ , do_allocation_metadata = CONFIG_BOOLEAN_AUTO;
+
+ static usec_t refresh_delta = 0, refresh_every = 60 * USEC_PER_SEC;
+ static char *btrfs_path = NULL;
+
+ (void)dt;
+
+ if(unlikely(!initialized)) {
+ initialized = 1;
+
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/fs/btrfs");
+ btrfs_path = config_get("plugin:proc:/sys/fs/btrfs", "path to monitor", filename);
+
+ refresh_every = config_get_number("plugin:proc:/sys/fs/btrfs", "check for btrfs changes every", refresh_every / USEC_PER_SEC) * USEC_PER_SEC;
+ refresh_delta = refresh_every;
+
+ do_allocation_disks = config_get_boolean_ondemand("plugin:proc:/sys/fs/btrfs", "physical disks allocation", do_allocation_disks);
+ do_allocation_data = config_get_boolean_ondemand("plugin:proc:/sys/fs/btrfs", "data allocation", do_allocation_data);
+ do_allocation_metadata = config_get_boolean_ondemand("plugin:proc:/sys/fs/btrfs", "metadata allocation", do_allocation_metadata);
+ do_allocation_system = config_get_boolean_ondemand("plugin:proc:/sys/fs/btrfs", "system allocation", do_allocation_system);
+ }
+
+ refresh_delta += dt;
+ if(refresh_delta >= refresh_every) {
+ refresh_delta = 0;
+ find_all_btrfs_pools(btrfs_path);
+ }
+
+ BTRFS_NODE *node;
+ for(node = nodes; node ; node = node->next) {
+ // --------------------------------------------------------------------
+ // allocation/system
+
+ #define collect_btrfs_allocation_field(FIELD) \
+ read_single_number_file(node->allocation_ ## FIELD ## _filename, &node->allocation_ ## FIELD)
+
+ #define collect_btrfs_allocation_section_field(SECTION, FIELD) \
+ read_single_number_file(node->allocation_ ## SECTION ## _ ## FIELD ## _filename, &node->allocation_ ## SECTION ## _ ## FIELD)
+
+ if(do_allocation_disks != CONFIG_BOOLEAN_NO) {
+ if( collect_btrfs_allocation_section_field(data, disk_total) != 0
+ || collect_btrfs_allocation_section_field(data, disk_used) != 0
+ || collect_btrfs_allocation_section_field(metadata, disk_total) != 0
+ || collect_btrfs_allocation_section_field(metadata, disk_used) != 0
+ || collect_btrfs_allocation_section_field(system, disk_total) != 0
+ || collect_btrfs_allocation_section_field(system, disk_used) != 0) {
+ error("BTRFS: failed to collect physical disks allocation for '%s'", node->id);
+ // make it refresh btrfs at the next iteration
+ refresh_delta = refresh_every;
+ continue;
+ }
+ }
+
+ if(do_allocation_data != CONFIG_BOOLEAN_NO) {
+ if (collect_btrfs_allocation_section_field(data, total_bytes) != 0
+ || collect_btrfs_allocation_section_field(data, bytes_used) != 0) {
+ error("BTRFS: failed to collect allocation/data for '%s'", node->id);
+ // make it refresh btrfs at the next iteration
+ refresh_delta = refresh_every;
+ continue;
+ }
+ }
+
+ if(do_allocation_metadata != CONFIG_BOOLEAN_NO) {
+ if (collect_btrfs_allocation_section_field(metadata, total_bytes) != 0
+ || collect_btrfs_allocation_section_field(metadata, bytes_used) != 0
+ || collect_btrfs_allocation_field(global_rsv_size) != 0
+ ) {
+ error("BTRFS: failed to collect allocation/metadata for '%s'", node->id);
+ // make it refresh btrfs at the next iteration
+ refresh_delta = refresh_every;
+ continue;
+ }
+ }
+
+ if(do_allocation_system != CONFIG_BOOLEAN_NO) {
+ if (collect_btrfs_allocation_section_field(system, total_bytes) != 0
+ || collect_btrfs_allocation_section_field(system, bytes_used) != 0) {
+ error("BTRFS: failed to collect allocation/system for '%s'", node->id);
+ // make it refresh btrfs at the next iteration
+ refresh_delta = refresh_every;
+ continue;
+ }
+ }
+
+ // --------------------------------------------------------------------
+ // allocation/disks
+
+ if(do_allocation_disks == CONFIG_BOOLEAN_YES || (do_allocation_disks == CONFIG_BOOLEAN_AUTO && node->all_disks_total && node->allocation_data_disk_total)) {
+ do_allocation_disks = CONFIG_BOOLEAN_YES;
+
+ if(unlikely(!node->st_allocation_disks)) {
+ char id[RRD_ID_LENGTH_MAX + 1], name[RRD_ID_LENGTH_MAX + 1], title[200 + 1];
+
+ snprintf(id, RRD_ID_LENGTH_MAX, "disk_%s", node->id);
+ snprintf(name, RRD_ID_LENGTH_MAX, "disk_%s", node->label);
+ snprintf(title, 200, "BTRFS Physical Disk Allocation for %s", node->label);
+
+ netdata_fix_chart_id(id);
+ netdata_fix_chart_name(name);
+
+ node->st_allocation_disks = rrdset_create_localhost(
+ "btrfs"
+ , id
+ , name
+ , node->label
+ , "btrfs.disk"
+ , title
+ , "MB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_BTRFS_NAME
+ , NETDATA_CHART_PRIO_BTRFS_DISK
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ node->rd_allocation_disks_unallocated = rrddim_add(node->st_allocation_disks, "unallocated", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ node->rd_allocation_disks_data_free = rrddim_add(node->st_allocation_disks, "data_free", "data free", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ node->rd_allocation_disks_data_used = rrddim_add(node->st_allocation_disks, "data_used", "data used", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ node->rd_allocation_disks_metadata_free = rrddim_add(node->st_allocation_disks, "meta_free", "meta free", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ node->rd_allocation_disks_metadata_used = rrddim_add(node->st_allocation_disks, "meta_used", "meta used", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ node->rd_allocation_disks_system_free = rrddim_add(node->st_allocation_disks, "sys_free", "sys free", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ node->rd_allocation_disks_system_used = rrddim_add(node->st_allocation_disks, "sys_used", "sys used", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(node->st_allocation_disks);
+
+ // unsigned long long disk_used = node->allocation_data_disk_used + node->allocation_metadata_disk_used + node->allocation_system_disk_used;
+ unsigned long long disk_total = node->allocation_data_disk_total + node->allocation_metadata_disk_total + node->allocation_system_disk_total;
+ unsigned long long disk_unallocated = node->all_disks_total - disk_total;
+
+ rrddim_set_by_pointer(node->st_allocation_disks, node->rd_allocation_disks_unallocated, disk_unallocated);
+ rrddim_set_by_pointer(node->st_allocation_disks, node->rd_allocation_disks_data_used, node->allocation_data_disk_used);
+ rrddim_set_by_pointer(node->st_allocation_disks, node->rd_allocation_disks_data_free, node->allocation_data_disk_total - node->allocation_data_disk_used);
+ rrddim_set_by_pointer(node->st_allocation_disks, node->rd_allocation_disks_metadata_used, node->allocation_metadata_disk_used);
+ rrddim_set_by_pointer(node->st_allocation_disks, node->rd_allocation_disks_metadata_free, node->allocation_metadata_disk_total - node->allocation_metadata_disk_used);
+ rrddim_set_by_pointer(node->st_allocation_disks, node->rd_allocation_disks_system_used, node->allocation_system_disk_used);
+ rrddim_set_by_pointer(node->st_allocation_disks, node->rd_allocation_disks_system_free, node->allocation_system_disk_total - node->allocation_system_disk_used);
+ rrdset_done(node->st_allocation_disks);
+ }
+
+
+ // --------------------------------------------------------------------
+ // allocation/data
+
+ if(do_allocation_data == CONFIG_BOOLEAN_YES || (do_allocation_data == CONFIG_BOOLEAN_AUTO && node->allocation_data_total_bytes)) {
+ do_allocation_data = CONFIG_BOOLEAN_YES;
+
+ if(unlikely(!node->st_allocation_data)) {
+ char id[RRD_ID_LENGTH_MAX + 1], name[RRD_ID_LENGTH_MAX + 1], title[200 + 1];
+
+ snprintf(id, RRD_ID_LENGTH_MAX, "data_%s", node->id);
+ snprintf(name, RRD_ID_LENGTH_MAX, "data_%s", node->label);
+ snprintf(title, 200, "BTRFS Data Allocation for %s", node->label);
+
+ netdata_fix_chart_id(id);
+ netdata_fix_chart_name(name);
+
+ node->st_allocation_data = rrdset_create_localhost(
+ "btrfs"
+ , id
+ , name
+ , node->label
+ , "btrfs.data"
+ , title
+ , "MB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_BTRFS_NAME
+ , NETDATA_CHART_PRIO_BTRFS_DATA
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ node->rd_allocation_data_free = rrddim_add(node->st_allocation_data, "free", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ node->rd_allocation_data_used = rrddim_add(node->st_allocation_data, "used", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(node->st_allocation_data);
+
+ rrddim_set_by_pointer(node->st_allocation_data, node->rd_allocation_data_free, node->allocation_data_total_bytes - node->allocation_data_bytes_used);
+ rrddim_set_by_pointer(node->st_allocation_data, node->rd_allocation_data_used, node->allocation_data_bytes_used);
+ rrdset_done(node->st_allocation_data);
+ }
+
+ // --------------------------------------------------------------------
+ // allocation/metadata
+
+ if(do_allocation_metadata == CONFIG_BOOLEAN_YES || (do_allocation_metadata == CONFIG_BOOLEAN_AUTO && node->allocation_metadata_total_bytes)) {
+ do_allocation_metadata = CONFIG_BOOLEAN_YES;
+
+ if(unlikely(!node->st_allocation_metadata)) {
+ char id[RRD_ID_LENGTH_MAX + 1], name[RRD_ID_LENGTH_MAX + 1], title[200 + 1];
+
+ snprintf(id, RRD_ID_LENGTH_MAX, "metadata_%s", node->id);
+ snprintf(name, RRD_ID_LENGTH_MAX, "metadata_%s", node->label);
+ snprintf(title, 200, "BTRFS Metadata Allocation for %s", node->label);
+
+ netdata_fix_chart_id(id);
+ netdata_fix_chart_name(name);
+
+ node->st_allocation_metadata = rrdset_create_localhost(
+ "btrfs"
+ , id
+ , name
+ , node->label
+ , "btrfs.metadata"
+ , title
+ , "MB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_BTRFS_NAME
+ , NETDATA_CHART_PRIO_BTRFS_METADATA
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ node->rd_allocation_metadata_free = rrddim_add(node->st_allocation_metadata, "free", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ node->rd_allocation_metadata_used = rrddim_add(node->st_allocation_metadata, "used", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ node->rd_allocation_metadata_reserved = rrddim_add(node->st_allocation_metadata, "reserved", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(node->st_allocation_metadata);
+
+ rrddim_set_by_pointer(node->st_allocation_metadata, node->rd_allocation_metadata_free, node->allocation_metadata_total_bytes - node->allocation_metadata_bytes_used - node->allocation_global_rsv_size);
+ rrddim_set_by_pointer(node->st_allocation_metadata, node->rd_allocation_metadata_used, node->allocation_metadata_bytes_used);
+ rrddim_set_by_pointer(node->st_allocation_metadata, node->rd_allocation_metadata_reserved, node->allocation_global_rsv_size);
+ rrdset_done(node->st_allocation_metadata);
+ }
+
+ // --------------------------------------------------------------------
+ // allocation/system
+
+ if(do_allocation_system == CONFIG_BOOLEAN_YES || (do_allocation_system == CONFIG_BOOLEAN_AUTO && node->allocation_system_total_bytes)) {
+ do_allocation_system = CONFIG_BOOLEAN_YES;
+
+ if(unlikely(!node->st_allocation_system)) {
+ char id[RRD_ID_LENGTH_MAX + 1], name[RRD_ID_LENGTH_MAX + 1], title[200 + 1];
+
+ snprintf(id, RRD_ID_LENGTH_MAX, "system_%s", node->id);
+ snprintf(name, RRD_ID_LENGTH_MAX, "system_%s", node->label);
+ snprintf(title, 200, "BTRFS System Allocation for %s", node->label);
+
+ netdata_fix_chart_id(id);
+ netdata_fix_chart_name(name);
+
+ node->st_allocation_system = rrdset_create_localhost(
+ "btrfs"
+ , id
+ , name
+ , node->label
+ , "btrfs.system"
+ , title
+ , "MB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_BTRFS_NAME
+ , NETDATA_CHART_PRIO_BTRFS_SYSTEM
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ node->rd_allocation_system_free = rrddim_add(node->st_allocation_system, "free", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ node->rd_allocation_system_used = rrddim_add(node->st_allocation_system, "used", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(node->st_allocation_system);
+
+ rrddim_set_by_pointer(node->st_allocation_system, node->rd_allocation_system_free, node->allocation_system_total_bytes - node->allocation_system_bytes_used);
+ rrddim_set_by_pointer(node->st_allocation_system, node->rd_allocation_system_used, node->allocation_system_bytes_used);
+ rrdset_done(node->st_allocation_system);
+ }
+ }
+
+ return 0;
+}
+
diff --git a/collectors/proc.plugin/sys_kernel_mm_ksm.c b/collectors/proc.plugin/sys_kernel_mm_ksm.c
new file mode 100644
index 000000000..0f5c79c49
--- /dev/null
+++ b/collectors/proc.plugin/sys_kernel_mm_ksm.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_KSM_NAME "/sys/kernel/mm/ksm"
+
+typedef struct ksm_name_value {
+ char filename[FILENAME_MAX + 1];
+ unsigned long long value;
+} KSM_NAME_VALUE;
+
+#define PAGES_SHARED 0
+#define PAGES_SHARING 1
+#define PAGES_UNSHARED 2
+#define PAGES_VOLATILE 3
+#define PAGES_TO_SCAN 4
+
+KSM_NAME_VALUE values[] = {
+ [PAGES_SHARED] = { "/sys/kernel/mm/ksm/pages_shared", 0ULL },
+ [PAGES_SHARING] = { "/sys/kernel/mm/ksm/pages_sharing", 0ULL },
+ [PAGES_UNSHARED] = { "/sys/kernel/mm/ksm/pages_unshared", 0ULL },
+ [PAGES_VOLATILE] = { "/sys/kernel/mm/ksm/pages_volatile", 0ULL },
+ // [PAGES_TO_SCAN] = { "/sys/kernel/mm/ksm/pages_to_scan", 0ULL },
+};
+
+int do_sys_kernel_mm_ksm(int update_every, usec_t dt) {
+ (void)dt;
+ static procfile *ff_pages_shared = NULL, *ff_pages_sharing = NULL, *ff_pages_unshared = NULL, *ff_pages_volatile = NULL/*, *ff_pages_to_scan = NULL*/;
+ static unsigned long page_size = 0;
+
+ if(unlikely(page_size == 0))
+ page_size = (unsigned long)sysconf(_SC_PAGESIZE);
+
+ if(unlikely(!ff_pages_shared)) {
+ snprintfz(values[PAGES_SHARED].filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/kernel/mm/ksm/pages_shared");
+ snprintfz(values[PAGES_SHARED].filename, FILENAME_MAX, "%s", config_get("plugin:proc:/sys/kernel/mm/ksm", "/sys/kernel/mm/ksm/pages_shared", values[PAGES_SHARED].filename));
+ ff_pages_shared = procfile_open(values[PAGES_SHARED].filename, " \t:", PROCFILE_FLAG_DEFAULT);
+ }
+
+ if(unlikely(!ff_pages_sharing)) {
+ snprintfz(values[PAGES_SHARING].filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/kernel/mm/ksm/pages_sharing");
+ snprintfz(values[PAGES_SHARING].filename, FILENAME_MAX, "%s", config_get("plugin:proc:/sys/kernel/mm/ksm", "/sys/kernel/mm/ksm/pages_sharing", values[PAGES_SHARING].filename));
+ ff_pages_sharing = procfile_open(values[PAGES_SHARING].filename, " \t:", PROCFILE_FLAG_DEFAULT);
+ }
+
+ if(unlikely(!ff_pages_unshared)) {
+ snprintfz(values[PAGES_UNSHARED].filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/kernel/mm/ksm/pages_unshared");
+ snprintfz(values[PAGES_UNSHARED].filename, FILENAME_MAX, "%s", config_get("plugin:proc:/sys/kernel/mm/ksm", "/sys/kernel/mm/ksm/pages_unshared", values[PAGES_UNSHARED].filename));
+ ff_pages_unshared = procfile_open(values[PAGES_UNSHARED].filename, " \t:", PROCFILE_FLAG_DEFAULT);
+ }
+
+ if(unlikely(!ff_pages_volatile)) {
+ snprintfz(values[PAGES_VOLATILE].filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/kernel/mm/ksm/pages_volatile");
+ snprintfz(values[PAGES_VOLATILE].filename, FILENAME_MAX, "%s", config_get("plugin:proc:/sys/kernel/mm/ksm", "/sys/kernel/mm/ksm/pages_volatile", values[PAGES_VOLATILE].filename));
+ ff_pages_volatile = procfile_open(values[PAGES_VOLATILE].filename, " \t:", PROCFILE_FLAG_DEFAULT);
+ }
+
+ //if(unlikely(!ff_pages_to_scan)) {
+ // snprintfz(values[PAGES_TO_SCAN].filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/kernel/mm/ksm/pages_to_scan");
+ // snprintfz(values[PAGES_TO_SCAN].filename, FILENAME_MAX, "%s", config_get("plugin:proc:/sys/kernel/mm/ksm", "/sys/kernel/mm/ksm/pages_to_scan", values[PAGES_TO_SCAN].filename));
+ // ff_pages_to_scan = procfile_open(values[PAGES_TO_SCAN].filename, " \t:", PROCFILE_FLAG_DEFAULT);
+ //}
+
+ if(unlikely(!ff_pages_shared || !ff_pages_sharing || !ff_pages_unshared || !ff_pages_volatile /*|| !ff_pages_to_scan */))
+ return 1;
+
+ unsigned long long pages_shared = 0, pages_sharing = 0, pages_unshared = 0, pages_volatile = 0, /*pages_to_scan = 0,*/ offered = 0, saved = 0;
+
+ ff_pages_shared = procfile_readall(ff_pages_shared);
+ if(unlikely(!ff_pages_shared)) return 0; // we return 0, so that we will retry to open it next time
+ pages_shared = str2ull(procfile_lineword(ff_pages_shared, 0, 0));
+
+ ff_pages_sharing = procfile_readall(ff_pages_sharing);
+ if(unlikely(!ff_pages_sharing)) return 0; // we return 0, so that we will retry to open it next time
+ pages_sharing = str2ull(procfile_lineword(ff_pages_sharing, 0, 0));
+
+ ff_pages_unshared = procfile_readall(ff_pages_unshared);
+ if(unlikely(!ff_pages_unshared)) return 0; // we return 0, so that we will retry to open it next time
+ pages_unshared = str2ull(procfile_lineword(ff_pages_unshared, 0, 0));
+
+ ff_pages_volatile = procfile_readall(ff_pages_volatile);
+ if(unlikely(!ff_pages_volatile)) return 0; // we return 0, so that we will retry to open it next time
+ pages_volatile = str2ull(procfile_lineword(ff_pages_volatile, 0, 0));
+
+ //ff_pages_to_scan = procfile_readall(ff_pages_to_scan);
+ //if(unlikely(!ff_pages_to_scan)) return 0; // we return 0, so that we will retry to open it next time
+ //pages_to_scan = str2ull(procfile_lineword(ff_pages_to_scan, 0, 0));
+
+ offered = pages_sharing + pages_shared + pages_unshared + pages_volatile;
+ saved = pages_sharing;
+
+ if(unlikely(!offered /*|| !pages_to_scan*/)) return 0;
+
+ // --------------------------------------------------------------------
+
+ {
+ static RRDSET *st_mem_ksm = NULL;
+ static RRDDIM *rd_shared = NULL, *rd_unshared = NULL, *rd_sharing = NULL, *rd_volatile = NULL/*, *rd_to_scan = NULL*/;
+
+ if (unlikely(!st_mem_ksm)) {
+ st_mem_ksm = rrdset_create_localhost(
+ "mem"
+ , "ksm"
+ , NULL
+ , "ksm"
+ , NULL
+ , "Kernel Same Page Merging"
+ , "MB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_KSM_NAME
+ , NETDATA_CHART_PRIO_MEM_KSM
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_shared = rrddim_add(st_mem_ksm, "shared", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_unshared = rrddim_add(st_mem_ksm, "unshared", NULL, -1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_sharing = rrddim_add(st_mem_ksm, "sharing", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_volatile = rrddim_add(st_mem_ksm, "volatile", NULL, -1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ //rd_to_scan = rrddim_add(st_mem_ksm, "to_scan", "to scan", -1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(st_mem_ksm);
+
+ rrddim_set_by_pointer(st_mem_ksm, rd_shared, pages_shared * page_size);
+ rrddim_set_by_pointer(st_mem_ksm, rd_unshared, pages_unshared * page_size);
+ rrddim_set_by_pointer(st_mem_ksm, rd_sharing, pages_sharing * page_size);
+ rrddim_set_by_pointer(st_mem_ksm, rd_volatile, pages_volatile * page_size);
+ //rrddim_set_by_pointer(st_mem_ksm, rd_to_scan, pages_to_scan * page_size);
+
+ rrdset_done(st_mem_ksm);
+ }
+
+ // --------------------------------------------------------------------
+
+ {
+ static RRDSET *st_mem_ksm_savings = NULL;
+ static RRDDIM *rd_savings = NULL, *rd_offered = NULL;
+
+ if (unlikely(!st_mem_ksm_savings)) {
+ st_mem_ksm_savings = rrdset_create_localhost(
+ "mem"
+ , "ksm_savings"
+ , NULL
+ , "ksm"
+ , NULL
+ , "Kernel Same Page Merging Savings"
+ , "MB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_KSM_NAME
+ , NETDATA_CHART_PRIO_MEM_KSM_SAVINGS
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_savings = rrddim_add(st_mem_ksm_savings, "savings", NULL, -1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_offered = rrddim_add(st_mem_ksm_savings, "offered", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(st_mem_ksm_savings);
+
+ rrddim_set_by_pointer(st_mem_ksm_savings, rd_savings, saved * page_size);
+ rrddim_set_by_pointer(st_mem_ksm_savings, rd_offered, offered * page_size);
+
+ rrdset_done(st_mem_ksm_savings);
+ }
+
+ // --------------------------------------------------------------------
+
+ {
+ static RRDSET *st_mem_ksm_ratios = NULL;
+ static RRDDIM *rd_savings = NULL;
+
+ if (unlikely(!st_mem_ksm_ratios)) {
+ st_mem_ksm_ratios = rrdset_create_localhost(
+ "mem"
+ , "ksm_ratios"
+ , NULL
+ , "ksm"
+ , NULL
+ , "Kernel Same Page Merging Effectiveness"
+ , "percentage"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_KSM_NAME
+ , NETDATA_CHART_PRIO_MEM_KSM_RATIOS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_savings = rrddim_add(st_mem_ksm_ratios, "savings", NULL, 1, 10000, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(st_mem_ksm_ratios);
+
+ rrddim_set_by_pointer(st_mem_ksm_ratios, rd_savings, (saved * 1000000) / offered);
+
+ rrdset_done(st_mem_ksm_ratios);
+ }
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/zfs_common.c b/collectors/proc.plugin/zfs_common.c
new file mode 100644
index 000000000..1aaceb908
--- /dev/null
+++ b/collectors/proc.plugin/zfs_common.c
@@ -0,0 +1,714 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "zfs_common.h"
+
+struct arcstats arcstats = { 0 };
+
+void generate_charts_arcstats(const char *plugin, const char *module, int update_every) {
+
+ // ARC reads
+ unsigned long long aread = arcstats.hits + arcstats.misses;
+
+ // Demand reads
+ unsigned long long dhit = arcstats.demand_data_hits + arcstats.demand_metadata_hits;
+ unsigned long long dmiss = arcstats.demand_data_misses + arcstats.demand_metadata_misses;
+ unsigned long long dread = dhit + dmiss;
+
+ // Prefetch reads
+ unsigned long long phit = arcstats.prefetch_data_hits + arcstats.prefetch_metadata_hits;
+ unsigned long long pmiss = arcstats.prefetch_data_misses + arcstats.prefetch_metadata_misses;
+ unsigned long long pread = phit + pmiss;
+
+ // Metadata reads
+ unsigned long long mhit = arcstats.prefetch_metadata_hits + arcstats.demand_metadata_hits;
+ unsigned long long mmiss = arcstats.prefetch_metadata_misses + arcstats.demand_metadata_misses;
+ unsigned long long mread = mhit + mmiss;
+
+ // l2 reads
+ unsigned long long l2hit = arcstats.l2_hits;
+ unsigned long long l2miss = arcstats.l2_misses;
+ unsigned long long l2read = l2hit + l2miss;
+
+ // --------------------------------------------------------------------
+
+ {
+ static RRDSET *st_arc_size = NULL;
+ static RRDDIM *rd_arc_size = NULL;
+ static RRDDIM *rd_arc_target_size = NULL;
+ static RRDDIM *rd_arc_target_min_size = NULL;
+ static RRDDIM *rd_arc_target_max_size = NULL;
+
+ if (unlikely(!st_arc_size)) {
+ st_arc_size = rrdset_create_localhost(
+ "zfs"
+ , "arc_size"
+ , NULL
+ , ZFS_FAMILY_SIZE
+ , NULL
+ , "ZFS ARC Size"
+ , "MB"
+ , plugin
+ , module
+ , NETDATA_CHART_PRIO_ZFS_ARC_SIZE
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_arc_size = rrddim_add(st_arc_size, "size", "arcsz", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_arc_target_size = rrddim_add(st_arc_size, "target", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_arc_target_min_size = rrddim_add(st_arc_size, "min", "min (hard limit)", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_arc_target_max_size = rrddim_add(st_arc_size, "max", "max (high water)", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(st_arc_size);
+
+ rrddim_set_by_pointer(st_arc_size, rd_arc_size, arcstats.size);
+ rrddim_set_by_pointer(st_arc_size, rd_arc_target_size, arcstats.c);
+ rrddim_set_by_pointer(st_arc_size, rd_arc_target_min_size, arcstats.c_min);
+ rrddim_set_by_pointer(st_arc_size, rd_arc_target_max_size, arcstats.c_max);
+ rrdset_done(st_arc_size);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(likely(arcstats.l2exist)) {
+ static RRDSET *st_l2_size = NULL;
+ static RRDDIM *rd_l2_size = NULL;
+ static RRDDIM *rd_l2_asize = NULL;
+
+ if (unlikely(!st_l2_size)) {
+ st_l2_size = rrdset_create_localhost(
+ "zfs"
+ , "l2_size"
+ , NULL
+ , ZFS_FAMILY_SIZE
+ , NULL
+ , "ZFS L2 ARC Size"
+ , "MB"
+ , plugin
+ , module
+ , NETDATA_CHART_PRIO_ZFS_L2_SIZE
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_l2_asize = rrddim_add(st_l2_size, "actual", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_l2_size = rrddim_add(st_l2_size, "size", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(st_l2_size);
+
+ rrddim_set_by_pointer(st_l2_size, rd_l2_size, arcstats.l2_size);
+ rrddim_set_by_pointer(st_l2_size, rd_l2_asize, arcstats.l2_asize);
+ rrdset_done(st_l2_size);
+ }
+
+ // --------------------------------------------------------------------
+
+ {
+ static RRDSET *st_reads = NULL;
+ static RRDDIM *rd_aread = NULL;
+ static RRDDIM *rd_dread = NULL;
+ static RRDDIM *rd_pread = NULL;
+ static RRDDIM *rd_mread = NULL;
+ static RRDDIM *rd_l2read = NULL;
+
+ if (unlikely(!st_reads)) {
+ st_reads = rrdset_create_localhost(
+ "zfs"
+ , "reads"
+ , NULL
+ , ZFS_FAMILY_ACCESSES
+ , NULL
+ , "ZFS Reads"
+ , "reads/s"
+ , plugin
+ , module
+ , NETDATA_CHART_PRIO_ZFS_READS
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_aread = rrddim_add(st_reads, "areads", "arc", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_dread = rrddim_add(st_reads, "dreads", "demand", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_pread = rrddim_add(st_reads, "preads", "prefetch", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_mread = rrddim_add(st_reads, "mreads", "metadata", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ if(arcstats.l2exist)
+ rd_l2read = rrddim_add(st_reads, "l2reads", "l2", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_reads);
+
+ rrddim_set_by_pointer(st_reads, rd_aread, aread);
+ rrddim_set_by_pointer(st_reads, rd_dread, dread);
+ rrddim_set_by_pointer(st_reads, rd_pread, pread);
+ rrddim_set_by_pointer(st_reads, rd_mread, mread);
+
+ if(arcstats.l2exist)
+ rrddim_set_by_pointer(st_reads, rd_l2read, l2read);
+
+ rrdset_done(st_reads);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(likely(arcstats.l2exist)) {
+ static RRDSET *st_l2bytes = NULL;
+ static RRDDIM *rd_l2_read_bytes = NULL;
+ static RRDDIM *rd_l2_write_bytes = NULL;
+
+ if (unlikely(!st_l2bytes)) {
+ st_l2bytes = rrdset_create_localhost(
+ "zfs"
+ , "bytes"
+ , NULL
+ , ZFS_FAMILY_ACCESSES
+ , NULL
+ , "ZFS ARC L2 Read/Write Rate"
+ , "kilobytes/s"
+ , plugin
+ , module
+ , NETDATA_CHART_PRIO_ZFS_IO
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_l2_read_bytes = rrddim_add(st_l2bytes, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
+ rd_l2_write_bytes = rrddim_add(st_l2bytes, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_l2bytes);
+
+ rrddim_set_by_pointer(st_l2bytes, rd_l2_read_bytes, arcstats.l2_read_bytes);
+ rrddim_set_by_pointer(st_l2bytes, rd_l2_write_bytes, arcstats.l2_write_bytes);
+ rrdset_done(st_l2bytes);
+ }
+
+ // --------------------------------------------------------------------
+
+ {
+ static RRDSET *st_ahits = NULL;
+ static RRDDIM *rd_ahits = NULL;
+ static RRDDIM *rd_amisses = NULL;
+
+ if (unlikely(!st_ahits)) {
+ st_ahits = rrdset_create_localhost(
+ "zfs"
+ , "hits"
+ , NULL
+ , ZFS_FAMILY_EFFICIENCY
+ , NULL
+ , "ZFS ARC Hits"
+ , "percentage"
+ , plugin
+ , module
+ , NETDATA_CHART_PRIO_ZFS_HITS
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_ahits = rrddim_add(st_ahits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_amisses = rrddim_add(st_ahits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ }
+ else
+ rrdset_next(st_ahits);
+
+ rrddim_set_by_pointer(st_ahits, rd_ahits, arcstats.hits);
+ rrddim_set_by_pointer(st_ahits, rd_amisses, arcstats.misses);
+ rrdset_done(st_ahits);
+ }
+
+ // --------------------------------------------------------------------
+
+ {
+ static RRDSET *st_dhits = NULL;
+ static RRDDIM *rd_dhits = NULL;
+ static RRDDIM *rd_dmisses = NULL;
+
+ if (unlikely(!st_dhits)) {
+ st_dhits = rrdset_create_localhost(
+ "zfs"
+ , "dhits"
+ , NULL
+ , ZFS_FAMILY_EFFICIENCY
+ , NULL
+ , "ZFS Demand Hits"
+ , "percentage"
+ , plugin
+ , module
+ , NETDATA_CHART_PRIO_ZFS_DHITS
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_dhits = rrddim_add(st_dhits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_dmisses = rrddim_add(st_dhits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ }
+ else
+ rrdset_next(st_dhits);
+
+ rrddim_set_by_pointer(st_dhits, rd_dhits, dhit);
+ rrddim_set_by_pointer(st_dhits, rd_dmisses, dmiss);
+ rrdset_done(st_dhits);
+ }
+
+ // --------------------------------------------------------------------
+
+ {
+ static RRDSET *st_phits = NULL;
+ static RRDDIM *rd_phits = NULL;
+ static RRDDIM *rd_pmisses = NULL;
+
+ if (unlikely(!st_phits)) {
+ st_phits = rrdset_create_localhost(
+ "zfs"
+ , "phits"
+ , NULL
+ , ZFS_FAMILY_EFFICIENCY
+ , NULL
+ , "ZFS Prefetch Hits"
+ , "percentage"
+ , plugin
+ , module
+ , NETDATA_CHART_PRIO_ZFS_PHITS
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_phits = rrddim_add(st_phits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_pmisses = rrddim_add(st_phits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ }
+ else
+ rrdset_next(st_phits);
+
+ rrddim_set_by_pointer(st_phits, rd_phits, phit);
+ rrddim_set_by_pointer(st_phits, rd_pmisses, pmiss);
+ rrdset_done(st_phits);
+ }
+
+ // --------------------------------------------------------------------
+
+ {
+ static RRDSET *st_mhits = NULL;
+ static RRDDIM *rd_mhits = NULL;
+ static RRDDIM *rd_mmisses = NULL;
+
+ if (unlikely(!st_mhits)) {
+ st_mhits = rrdset_create_localhost(
+ "zfs"
+ , "mhits"
+ , NULL
+ , ZFS_FAMILY_EFFICIENCY
+ , NULL
+ , "ZFS Metadata Hits"
+ , "percentage"
+ , plugin
+ , module
+ , NETDATA_CHART_PRIO_ZFS_MHITS
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_mhits = rrddim_add(st_mhits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_mmisses = rrddim_add(st_mhits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ }
+ else
+ rrdset_next(st_mhits);
+
+ rrddim_set_by_pointer(st_mhits, rd_mhits, mhit);
+ rrddim_set_by_pointer(st_mhits, rd_mmisses, mmiss);
+ rrdset_done(st_mhits);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(likely(arcstats.l2exist)) {
+ static RRDSET *st_l2hits = NULL;
+ static RRDDIM *rd_l2hits = NULL;
+ static RRDDIM *rd_l2misses = NULL;
+
+ if (unlikely(!st_l2hits)) {
+ st_l2hits = rrdset_create_localhost(
+ "zfs"
+ , "l2hits"
+ , NULL
+ , ZFS_FAMILY_EFFICIENCY
+ , NULL
+ , "ZFS L2 Hits"
+ , "percentage"
+ , plugin
+ , module
+ , NETDATA_CHART_PRIO_ZFS_L2HITS
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_l2hits = rrddim_add(st_l2hits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_l2misses = rrddim_add(st_l2hits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ }
+ else
+ rrdset_next(st_l2hits);
+
+ rrddim_set_by_pointer(st_l2hits, rd_l2hits, l2hit);
+ rrddim_set_by_pointer(st_l2hits, rd_l2misses, l2miss);
+ rrdset_done(st_l2hits);
+ }
+
+ // --------------------------------------------------------------------
+
+ {
+ static RRDSET *st_list_hits = NULL;
+ static RRDDIM *rd_mfu = NULL;
+ static RRDDIM *rd_mru = NULL;
+ static RRDDIM *rd_mfug = NULL;
+ static RRDDIM *rd_mrug = NULL;
+
+ if (unlikely(!st_list_hits)) {
+ st_list_hits = rrdset_create_localhost(
+ "zfs"
+ , "list_hits"
+ , NULL
+ , ZFS_FAMILY_EFFICIENCY
+ , NULL
+ , "ZFS List Hits"
+ , "hits/s"
+ , plugin
+ , module
+ , NETDATA_CHART_PRIO_ZFS_LIST_HITS
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_mfu = rrddim_add(st_list_hits, "mfu", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_mfug = rrddim_add(st_list_hits, "mfug", "mfu ghost", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_mru = rrddim_add(st_list_hits, "mru", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_mrug = rrddim_add(st_list_hits, "mrug", "mru ghost", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_list_hits);
+
+ rrddim_set_by_pointer(st_list_hits, rd_mfu, arcstats.mfu_hits);
+ rrddim_set_by_pointer(st_list_hits, rd_mru, arcstats.mru_hits);
+ rrddim_set_by_pointer(st_list_hits, rd_mfug, arcstats.mfu_ghost_hits);
+ rrddim_set_by_pointer(st_list_hits, rd_mrug, arcstats.mru_ghost_hits);
+ rrdset_done(st_list_hits);
+ }
+}
+
+void generate_charts_arc_summary(const char *plugin, const char *module, int update_every) {
+ unsigned long long arc_accesses_total = arcstats.hits + arcstats.misses;
+ unsigned long long real_hits = arcstats.mfu_hits + arcstats.mru_hits;
+ unsigned long long real_misses = arc_accesses_total - real_hits;
+
+ //unsigned long long anon_hits = arcstats.hits - (arcstats.mfu_hits + arcstats.mru_hits + arcstats.mfu_ghost_hits + arcstats.mru_ghost_hits);
+
+ unsigned long long arc_size = arcstats.size;
+ unsigned long long mru_size = arcstats.p;
+ //unsigned long long target_min_size = arcstats.c_min;
+ //unsigned long long target_max_size = arcstats.c_max;
+ unsigned long long target_size = arcstats.c;
+ //unsigned long long target_size_ratio = (target_max_size / target_min_size);
+
+ unsigned long long mfu_size;
+ if(arc_size > target_size)
+ mfu_size = arc_size - mru_size;
+ else
+ mfu_size = target_size - mru_size;
+
+ // --------------------------------------------------------------------
+
+ {
+ static RRDSET *st_arc_size_breakdown = NULL;
+ static RRDDIM *rd_most_recent = NULL;
+ static RRDDIM *rd_most_frequent = NULL;
+
+ if (unlikely(!st_arc_size_breakdown)) {
+ st_arc_size_breakdown = rrdset_create_localhost(
+ "zfs"
+ , "arc_size_breakdown"
+ , NULL
+ , ZFS_FAMILY_EFFICIENCY
+ , NULL
+ , "ZFS ARC Size Breakdown"
+ , "percentage"
+ , plugin
+ , module
+ , NETDATA_CHART_PRIO_ZFS_ARC_SIZE_BREAKDOWN
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_most_recent = rrddim_add(st_arc_size_breakdown, "recent", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL);
+ rd_most_frequent = rrddim_add(st_arc_size_breakdown, "frequent", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL);
+ }
+ else
+ rrdset_next(st_arc_size_breakdown);
+
+ rrddim_set_by_pointer(st_arc_size_breakdown, rd_most_recent, mru_size);
+ rrddim_set_by_pointer(st_arc_size_breakdown, rd_most_frequent, mfu_size);
+ rrdset_done(st_arc_size_breakdown);
+ }
+
+ // --------------------------------------------------------------------
+
+ {
+ static RRDSET *st_memory = NULL;
+#ifndef __FreeBSD__
+ static RRDDIM *rd_direct = NULL;
+#endif
+ static RRDDIM *rd_throttled = NULL;
+#ifndef __FreeBSD__
+ static RRDDIM *rd_indirect = NULL;
+#endif
+
+ if (unlikely(!st_memory)) {
+ st_memory = rrdset_create_localhost(
+ "zfs"
+ , "memory_ops"
+ , NULL
+ , ZFS_FAMILY_OPERATIONS
+ , NULL
+ , "ZFS Memory Operations"
+ , "operations/s"
+ , plugin
+ , module
+ , NETDATA_CHART_PRIO_ZFS_MEMORY_OPS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+#ifndef __FreeBSD__
+ rd_direct = rrddim_add(st_memory, "direct", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+#endif
+ rd_throttled = rrddim_add(st_memory, "throttled", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+#ifndef __FreeBSD__
+ rd_indirect = rrddim_add(st_memory, "indirect", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+#endif
+ }
+ else
+ rrdset_next(st_memory);
+
+#ifndef __FreeBSD__
+ rrddim_set_by_pointer(st_memory, rd_direct, arcstats.memory_direct_count);
+#endif
+ rrddim_set_by_pointer(st_memory, rd_throttled, arcstats.memory_throttle_count);
+#ifndef __FreeBSD__
+ rrddim_set_by_pointer(st_memory, rd_indirect, arcstats.memory_indirect_count);
+#endif
+ rrdset_done(st_memory);
+ }
+
+ // --------------------------------------------------------------------
+
+ {
+ static RRDSET *st_important_ops = NULL;
+ static RRDDIM *rd_deleted = NULL;
+ static RRDDIM *rd_mutex_misses = NULL;
+ static RRDDIM *rd_evict_skips = NULL;
+ static RRDDIM *rd_hash_collisions = NULL;
+
+ if (unlikely(!st_important_ops)) {
+ st_important_ops = rrdset_create_localhost(
+ "zfs"
+ , "important_ops"
+ , NULL
+ , ZFS_FAMILY_OPERATIONS
+ , NULL
+ , "ZFS Important Operations"
+ , "operations/s"
+ , plugin
+ , module
+ , NETDATA_CHART_PRIO_ZFS_IMPORTANT_OPS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_evict_skips = rrddim_add(st_important_ops, "eskip", "evict skip", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_deleted = rrddim_add(st_important_ops, "deleted", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_mutex_misses = rrddim_add(st_important_ops, "mtxmis", "mutex miss", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_hash_collisions = rrddim_add(st_important_ops, "hash_collisions", "hash collisions", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_important_ops);
+
+ rrddim_set_by_pointer(st_important_ops, rd_deleted, arcstats.deleted);
+ rrddim_set_by_pointer(st_important_ops, rd_evict_skips, arcstats.evict_skip);
+ rrddim_set_by_pointer(st_important_ops, rd_mutex_misses, arcstats.mutex_miss);
+ rrddim_set_by_pointer(st_important_ops, rd_hash_collisions, arcstats.hash_collisions);
+ rrdset_done(st_important_ops);
+ }
+
+ // --------------------------------------------------------------------
+
+ {
+ static RRDSET *st_actual_hits = NULL;
+ static RRDDIM *rd_actual_hits = NULL;
+ static RRDDIM *rd_actual_misses = NULL;
+
+ if (unlikely(!st_actual_hits)) {
+ st_actual_hits = rrdset_create_localhost(
+ "zfs"
+ , "actual_hits"
+ , NULL
+ , ZFS_FAMILY_EFFICIENCY
+ , NULL
+ , "ZFS Actual Cache Hits"
+ , "percentage"
+ , plugin
+ , module
+ , NETDATA_CHART_PRIO_ZFS_ACTUAL_HITS
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_actual_hits = rrddim_add(st_actual_hits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_actual_misses = rrddim_add(st_actual_hits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ }
+ else
+ rrdset_next(st_actual_hits);
+
+ rrddim_set_by_pointer(st_actual_hits, rd_actual_hits, real_hits);
+ rrddim_set_by_pointer(st_actual_hits, rd_actual_misses, real_misses);
+ rrdset_done(st_actual_hits);
+ }
+
+ // --------------------------------------------------------------------
+
+ {
+ static RRDSET *st_demand_data_hits = NULL;
+ static RRDDIM *rd_demand_data_hits = NULL;
+ static RRDDIM *rd_demand_data_misses = NULL;
+
+ if (unlikely(!st_demand_data_hits)) {
+ st_demand_data_hits = rrdset_create_localhost(
+ "zfs"
+ , "demand_data_hits"
+ , NULL
+ , ZFS_FAMILY_EFFICIENCY
+ , NULL
+ , "ZFS Data Demand Efficiency"
+ , "percentage"
+ , plugin
+ , module
+ , NETDATA_CHART_PRIO_ZFS_DEMAND_DATA_HITS
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_demand_data_hits = rrddim_add(st_demand_data_hits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_demand_data_misses = rrddim_add(st_demand_data_hits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ }
+ else
+ rrdset_next(st_demand_data_hits);
+
+ rrddim_set_by_pointer(st_demand_data_hits, rd_demand_data_hits, arcstats.demand_data_hits);
+ rrddim_set_by_pointer(st_demand_data_hits, rd_demand_data_misses, arcstats.demand_data_misses);
+ rrdset_done(st_demand_data_hits);
+ }
+
+ // --------------------------------------------------------------------
+
+ {
+ static RRDSET *st_prefetch_data_hits = NULL;
+ static RRDDIM *rd_prefetch_data_hits = NULL;
+ static RRDDIM *rd_prefetch_data_misses = NULL;
+
+ if (unlikely(!st_prefetch_data_hits)) {
+ st_prefetch_data_hits = rrdset_create_localhost(
+ "zfs"
+ , "prefetch_data_hits"
+ , NULL
+ , ZFS_FAMILY_EFFICIENCY
+ , NULL
+ , "ZFS Data Prefetch Efficiency"
+ , "percentage"
+ , plugin
+ , module
+ , NETDATA_CHART_PRIO_ZFS_PREFETCH_DATA_HITS
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_prefetch_data_hits = rrddim_add(st_prefetch_data_hits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_prefetch_data_misses = rrddim_add(st_prefetch_data_hits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ }
+ else
+ rrdset_next(st_prefetch_data_hits);
+
+ rrddim_set_by_pointer(st_prefetch_data_hits, rd_prefetch_data_hits, arcstats.prefetch_data_hits);
+ rrddim_set_by_pointer(st_prefetch_data_hits, rd_prefetch_data_misses, arcstats.prefetch_data_misses);
+ rrdset_done(st_prefetch_data_hits);
+ }
+
+ // --------------------------------------------------------------------
+
+ {
+ static RRDSET *st_hash_elements = NULL;
+ static RRDDIM *rd_hash_elements_current = NULL;
+ static RRDDIM *rd_hash_elements_max = NULL;
+
+ if (unlikely(!st_hash_elements)) {
+ st_hash_elements = rrdset_create_localhost(
+ "zfs"
+ , "hash_elements"
+ , NULL
+ , ZFS_FAMILY_HASH
+ , NULL
+ , "ZFS ARC Hash Elements"
+ , "elements"
+ , plugin
+ , module
+ , NETDATA_CHART_PRIO_ZFS_HASH_ELEMENTS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_hash_elements_current = rrddim_add(st_hash_elements, "current", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rd_hash_elements_max = rrddim_add(st_hash_elements, "max", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(st_hash_elements);
+
+ rrddim_set_by_pointer(st_hash_elements, rd_hash_elements_current, arcstats.hash_elements);
+ rrddim_set_by_pointer(st_hash_elements, rd_hash_elements_max, arcstats.hash_elements_max);
+ rrdset_done(st_hash_elements);
+ }
+
+ // --------------------------------------------------------------------
+
+ {
+ static RRDSET *st_hash_chains = NULL;
+ static RRDDIM *rd_hash_chains_current = NULL;
+ static RRDDIM *rd_hash_chains_max = NULL;
+
+ if (unlikely(!st_hash_chains)) {
+ st_hash_chains = rrdset_create_localhost(
+ "zfs"
+ , "hash_chains"
+ , NULL
+ , ZFS_FAMILY_HASH
+ , NULL
+ , "ZFS ARC Hash Chains"
+ , "chains"
+ , plugin
+ , module
+ , NETDATA_CHART_PRIO_ZFS_HASH_CHAINS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_hash_chains_current = rrddim_add(st_hash_chains, "current", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rd_hash_chains_max = rrddim_add(st_hash_chains, "max", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(st_hash_chains);
+
+ rrddim_set_by_pointer(st_hash_chains, rd_hash_chains_current, arcstats.hash_chains);
+ rrddim_set_by_pointer(st_hash_chains, rd_hash_chains_max, arcstats.hash_chain_max);
+ rrdset_done(st_hash_chains);
+ }
+
+ // --------------------------------------------------------------------
+
+} \ No newline at end of file
diff --git a/collectors/proc.plugin/zfs_common.h b/collectors/proc.plugin/zfs_common.h
new file mode 100644
index 000000000..fab54f59a
--- /dev/null
+++ b/collectors/proc.plugin/zfs_common.h
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_ZFS_COMMON_H
+#define NETDATA_ZFS_COMMON_H 1
+
+#include "../../daemon/common.h"
+
+#define ZFS_FAMILY_SIZE "size"
+#define ZFS_FAMILY_EFFICIENCY "efficiency"
+#define ZFS_FAMILY_ACCESSES "accesses"
+#define ZFS_FAMILY_OPERATIONS "operations"
+#define ZFS_FAMILY_HASH "hashes"
+
+struct arcstats {
+ // values
+ unsigned long long hits;
+ unsigned long long misses;
+ unsigned long long demand_data_hits;
+ unsigned long long demand_data_misses;
+ unsigned long long demand_metadata_hits;
+ unsigned long long demand_metadata_misses;
+ unsigned long long prefetch_data_hits;
+ unsigned long long prefetch_data_misses;
+ unsigned long long prefetch_metadata_hits;
+ unsigned long long prefetch_metadata_misses;
+ unsigned long long mru_hits;
+ unsigned long long mru_ghost_hits;
+ unsigned long long mfu_hits;
+ unsigned long long mfu_ghost_hits;
+ unsigned long long deleted;
+ unsigned long long mutex_miss;
+ unsigned long long evict_skip;
+ unsigned long long evict_not_enough;
+ unsigned long long evict_l2_cached;
+ unsigned long long evict_l2_eligible;
+ unsigned long long evict_l2_ineligible;
+ unsigned long long evict_l2_skip;
+ unsigned long long hash_elements;
+ unsigned long long hash_elements_max;
+ unsigned long long hash_collisions;
+ unsigned long long hash_chains;
+ unsigned long long hash_chain_max;
+ unsigned long long p;
+ unsigned long long c;
+ unsigned long long c_min;
+ unsigned long long c_max;
+ unsigned long long size;
+ unsigned long long hdr_size;
+ unsigned long long data_size;
+ unsigned long long metadata_size;
+ unsigned long long other_size;
+ unsigned long long anon_size;
+ unsigned long long anon_evictable_data;
+ unsigned long long anon_evictable_metadata;
+ unsigned long long mru_size;
+ unsigned long long mru_evictable_data;
+ unsigned long long mru_evictable_metadata;
+ unsigned long long mru_ghost_size;
+ unsigned long long mru_ghost_evictable_data;
+ unsigned long long mru_ghost_evictable_metadata;
+ unsigned long long mfu_size;
+ unsigned long long mfu_evictable_data;
+ unsigned long long mfu_evictable_metadata;
+ unsigned long long mfu_ghost_size;
+ unsigned long long mfu_ghost_evictable_data;
+ unsigned long long mfu_ghost_evictable_metadata;
+ unsigned long long l2_hits;
+ unsigned long long l2_misses;
+ unsigned long long l2_feeds;
+ unsigned long long l2_rw_clash;
+ unsigned long long l2_read_bytes;
+ unsigned long long l2_write_bytes;
+ unsigned long long l2_writes_sent;
+ unsigned long long l2_writes_done;
+ unsigned long long l2_writes_error;
+ unsigned long long l2_writes_lock_retry;
+ unsigned long long l2_evict_lock_retry;
+ unsigned long long l2_evict_reading;
+ unsigned long long l2_evict_l1cached;
+ unsigned long long l2_free_on_write;
+ unsigned long long l2_cdata_free_on_write;
+ unsigned long long l2_abort_lowmem;
+ unsigned long long l2_cksum_bad;
+ unsigned long long l2_io_error;
+ unsigned long long l2_size;
+ unsigned long long l2_asize;
+ unsigned long long l2_hdr_size;
+ unsigned long long l2_compress_successes;
+ unsigned long long l2_compress_zeros;
+ unsigned long long l2_compress_failures;
+ unsigned long long memory_throttle_count;
+ unsigned long long duplicate_buffers;
+ unsigned long long duplicate_buffers_size;
+ unsigned long long duplicate_reads;
+ unsigned long long memory_direct_count;
+ unsigned long long memory_indirect_count;
+ unsigned long long arc_no_grow;
+ unsigned long long arc_tempreserve;
+ unsigned long long arc_loaned_bytes;
+ unsigned long long arc_prune;
+ unsigned long long arc_meta_used;
+ unsigned long long arc_meta_limit;
+ unsigned long long arc_meta_max;
+ unsigned long long arc_meta_min;
+ unsigned long long arc_need_free;
+ unsigned long long arc_sys_free;
+
+ // flags
+ int l2exist;
+};
+
+void generate_charts_arcstats(const char *plugin, const char *module, int update_every);
+void generate_charts_arc_summary(const char *plugin, const char *module, int update_every);
+
+#endif //NETDATA_ZFS_COMMON_H
diff --git a/collectors/python.d.plugin/Makefile.am b/collectors/python.d.plugin/Makefile.am
new file mode 100644
index 000000000..5f214e436
--- /dev/null
+++ b/collectors/python.d.plugin/Makefile.am
@@ -0,0 +1,244 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+CLEANFILES = \
+ python.d.plugin \
+ $(NULL)
+
+include $(top_srcdir)/build/subst.inc
+SUFFIXES = .in
+
+dist_libconfig_DATA = \
+ python.d.conf \
+ $(NULL)
+
+dist_plugins_SCRIPTS = \
+ python.d.plugin \
+ $(NULL)
+
+dist_noinst_DATA = \
+ python.d.plugin.in \
+ README.md \
+ $(NULL)
+
+dist_python_SCRIPTS = \
+ $(NULL)
+
+dist_python_DATA = \
+ $(NULL)
+
+userpythonconfigdir=$(configdir)/python.d
+dist_userpythonconfig_DATA = \
+ $(top_srcdir)/installer/.keep \
+ $(NULL)
+
+pythonconfigdir=$(libconfigdir)/python.d
+dist_pythonconfig_DATA = \
+ $(top_srcdir)/installer/.keep \
+ $(NULL)
+
+include adaptec_raid/Makefile.inc
+include apache/Makefile.inc
+include beanstalk/Makefile.inc
+include bind_rndc/Makefile.inc
+include boinc/Makefile.inc
+include ceph/Makefile.inc
+include chrony/Makefile.inc
+include couchdb/Makefile.inc
+include cpufreq/Makefile.inc
+include cpuidle/Makefile.inc
+include dnsdist/Makefile.inc
+include dns_query_time/Makefile.inc
+include dockerd/Makefile.inc
+include dovecot/Makefile.inc
+include elasticsearch/Makefile.inc
+include example/Makefile.inc
+include exim/Makefile.inc
+include fail2ban/Makefile.inc
+include freeradius/Makefile.inc
+include go_expvar/Makefile.inc
+include haproxy/Makefile.inc
+include hddtemp/Makefile.inc
+include httpcheck/Makefile.inc
+include icecast/Makefile.inc
+include ipfs/Makefile.inc
+include isc_dhcpd/Makefile.inc
+include linux_power_supply/Makefile.inc
+include litespeed/Makefile.inc
+include logind/Makefile.inc
+include mdstat/Makefile.inc
+include megacli/Makefile.inc
+include memcached/Makefile.inc
+include mongodb/Makefile.inc
+include monit/Makefile.inc
+include mysql/Makefile.inc
+include nginx/Makefile.inc
+include nginx_plus/Makefile.inc
+include nsd/Makefile.inc
+include ntpd/Makefile.inc
+include ovpn_status_log/Makefile.inc
+include phpfpm/Makefile.inc
+include portcheck/Makefile.inc
+include postfix/Makefile.inc
+include postgres/Makefile.inc
+include powerdns/Makefile.inc
+include proxysql/Makefile.inc
+include puppet/Makefile.inc
+include rabbitmq/Makefile.inc
+include redis/Makefile.inc
+include rethinkdbs/Makefile.inc
+include retroshare/Makefile.inc
+include samba/Makefile.inc
+include sensors/Makefile.inc
+include smartd_log/Makefile.inc
+include spigotmc/Makefile.inc
+include springboot/Makefile.inc
+include squid/Makefile.inc
+include tomcat/Makefile.inc
+include traefik/Makefile.inc
+include unbound/Makefile.inc
+include uwsgi/Makefile.inc
+include varnish/Makefile.inc
+include w1sensor/Makefile.inc
+include web_log/Makefile.inc
+
+pythonmodulesdir=$(pythondir)/python_modules
+dist_pythonmodules_DATA = \
+ python_modules/__init__.py \
+ $(NULL)
+
+basesdir=$(pythonmodulesdir)/bases
+dist_bases_DATA = \
+ python_modules/bases/__init__.py \
+ python_modules/bases/charts.py \
+ python_modules/bases/collection.py \
+ python_modules/bases/loaders.py \
+ python_modules/bases/loggers.py \
+ $(NULL)
+
+bases_framework_servicesdir=$(basesdir)/FrameworkServices
+dist_bases_framework_services_DATA = \
+ python_modules/bases/FrameworkServices/__init__.py \
+ python_modules/bases/FrameworkServices/ExecutableService.py \
+ python_modules/bases/FrameworkServices/LogService.py \
+ python_modules/bases/FrameworkServices/MySQLService.py \
+ python_modules/bases/FrameworkServices/SimpleService.py \
+ python_modules/bases/FrameworkServices/SocketService.py \
+ python_modules/bases/FrameworkServices/UrlService.py \
+ $(NULL)
+
+third_partydir=$(pythonmodulesdir)/third_party
+dist_third_party_DATA = \
+ python_modules/third_party/__init__.py \
+ python_modules/third_party/ordereddict.py \
+ python_modules/third_party/lm_sensors.py \
+ python_modules/third_party/mcrcon.py \
+ python_modules/third_party/boinc_client.py \
+ python_modules/third_party/monotonic.py \
+ $(NULL)
+
+pythonyaml2dir=$(pythonmodulesdir)/pyyaml2
+dist_pythonyaml2_DATA = \
+ python_modules/pyyaml2/__init__.py \
+ python_modules/pyyaml2/composer.py \
+ python_modules/pyyaml2/constructor.py \
+ python_modules/pyyaml2/cyaml.py \
+ python_modules/pyyaml2/dumper.py \
+ python_modules/pyyaml2/emitter.py \
+ python_modules/pyyaml2/error.py \
+ python_modules/pyyaml2/events.py \
+ python_modules/pyyaml2/loader.py \
+ python_modules/pyyaml2/nodes.py \
+ python_modules/pyyaml2/parser.py \
+ python_modules/pyyaml2/reader.py \
+ python_modules/pyyaml2/representer.py \
+ python_modules/pyyaml2/resolver.py \
+ python_modules/pyyaml2/scanner.py \
+ python_modules/pyyaml2/serializer.py \
+ python_modules/pyyaml2/tokens.py \
+ $(NULL)
+
+pythonyaml3dir=$(pythonmodulesdir)/pyyaml3
+dist_pythonyaml3_DATA = \
+ python_modules/pyyaml3/__init__.py \
+ python_modules/pyyaml3/composer.py \
+ python_modules/pyyaml3/constructor.py \
+ python_modules/pyyaml3/cyaml.py \
+ python_modules/pyyaml3/dumper.py \
+ python_modules/pyyaml3/emitter.py \
+ python_modules/pyyaml3/error.py \
+ python_modules/pyyaml3/events.py \
+ python_modules/pyyaml3/loader.py \
+ python_modules/pyyaml3/nodes.py \
+ python_modules/pyyaml3/parser.py \
+ python_modules/pyyaml3/reader.py \
+ python_modules/pyyaml3/representer.py \
+ python_modules/pyyaml3/resolver.py \
+ python_modules/pyyaml3/scanner.py \
+ python_modules/pyyaml3/serializer.py \
+ python_modules/pyyaml3/tokens.py \
+ $(NULL)
+
+python_urllib3dir=$(pythonmodulesdir)/urllib3
+dist_python_urllib3_DATA = \
+ python_modules/urllib3/__init__.py \
+ python_modules/urllib3/_collections.py \
+ python_modules/urllib3/connection.py \
+ python_modules/urllib3/connectionpool.py \
+ python_modules/urllib3/exceptions.py \
+ python_modules/urllib3/fields.py \
+ python_modules/urllib3/filepost.py \
+ python_modules/urllib3/response.py \
+ python_modules/urllib3/poolmanager.py \
+ python_modules/urllib3/request.py \
+ $(NULL)
+
+python_urllib3_utildir=$(python_urllib3dir)/util
+dist_python_urllib3_util_DATA = \
+ python_modules/urllib3/util/__init__.py \
+ python_modules/urllib3/util/connection.py \
+ python_modules/urllib3/util/request.py \
+ python_modules/urllib3/util/response.py \
+ python_modules/urllib3/util/retry.py \
+ python_modules/urllib3/util/selectors.py \
+ python_modules/urllib3/util/ssl_.py \
+ python_modules/urllib3/util/timeout.py \
+ python_modules/urllib3/util/url.py \
+ python_modules/urllib3/util/wait.py \
+ $(NULL)
+
+python_urllib3_packagesdir=$(python_urllib3dir)/packages
+dist_python_urllib3_packages_DATA = \
+ python_modules/urllib3/packages/__init__.py \
+ python_modules/urllib3/packages/ordered_dict.py \
+ python_modules/urllib3/packages/six.py \
+ $(NULL)
+
+python_urllib3_backportsdir=$(python_urllib3_packagesdir)/backports
+dist_python_urllib3_backports_DATA = \
+ python_modules/urllib3/packages/backports/__init__.py \
+ python_modules/urllib3/packages/backports/makefile.py \
+ $(NULL)
+
+python_urllib3_ssl_match_hostnamedir=$(python_urllib3_packagesdir)/ssl_match_hostname
+dist_python_urllib3_ssl_match_hostname_DATA = \
+ python_modules/urllib3/packages/ssl_match_hostname/__init__.py \
+ python_modules/urllib3/packages/ssl_match_hostname/_implementation.py \
+ $(NULL)
+
+python_urllib3_contribdir=$(python_urllib3dir)/contrib
+dist_python_urllib3_contrib_DATA = \
+ python_modules/urllib3/contrib/__init__.py \
+ python_modules/urllib3/contrib/appengine.py \
+ python_modules/urllib3/contrib/ntlmpool.py \
+ python_modules/urllib3/contrib/pyopenssl.py \
+ python_modules/urllib3/contrib/securetransport.py \
+ python_modules/urllib3/contrib/socks.py \
+ $(NULL)
+
+python_urllib3_securetransportdir=$(python_urllib3_contribdir)/_securetransport
+dist_python_urllib3_securetransport_DATA = \
+ python_modules/urllib3/contrib/_securetransport/__init__.py \
+ python_modules/urllib3/contrib/_securetransport/bindings.py \
+ python_modules/urllib3/contrib/_securetransport/low_level.py \
+ $(NULL)
diff --git a/collectors/python.d.plugin/Makefile.in b/collectors/python.d.plugin/Makefile.in
new file mode 100644
index 000000000..ca2743d58
--- /dev/null
+++ b/collectors/python.d.plugin/Makefile.in
@@ -0,0 +1,1987 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+DIST_COMMON = $(top_srcdir)/build/subst.inc \
+ $(srcdir)/adaptec_raid/Makefile.inc \
+ $(srcdir)/apache/Makefile.inc $(srcdir)/beanstalk/Makefile.inc \
+ $(srcdir)/bind_rndc/Makefile.inc $(srcdir)/boinc/Makefile.inc \
+ $(srcdir)/ceph/Makefile.inc $(srcdir)/chrony/Makefile.inc \
+ $(srcdir)/couchdb/Makefile.inc $(srcdir)/cpufreq/Makefile.inc \
+ $(srcdir)/cpuidle/Makefile.inc $(srcdir)/dnsdist/Makefile.inc \
+ $(srcdir)/dns_query_time/Makefile.inc \
+ $(srcdir)/dockerd/Makefile.inc $(srcdir)/dovecot/Makefile.inc \
+ $(srcdir)/elasticsearch/Makefile.inc \
+ $(srcdir)/example/Makefile.inc $(srcdir)/exim/Makefile.inc \
+ $(srcdir)/fail2ban/Makefile.inc \
+ $(srcdir)/freeradius/Makefile.inc \
+ $(srcdir)/go_expvar/Makefile.inc \
+ $(srcdir)/haproxy/Makefile.inc $(srcdir)/hddtemp/Makefile.inc \
+ $(srcdir)/httpcheck/Makefile.inc \
+ $(srcdir)/icecast/Makefile.inc $(srcdir)/ipfs/Makefile.inc \
+ $(srcdir)/isc_dhcpd/Makefile.inc \
+ $(srcdir)/linux_power_supply/Makefile.inc \
+ $(srcdir)/litespeed/Makefile.inc $(srcdir)/logind/Makefile.inc \
+ $(srcdir)/mdstat/Makefile.inc $(srcdir)/megacli/Makefile.inc \
+ $(srcdir)/memcached/Makefile.inc \
+ $(srcdir)/mongodb/Makefile.inc $(srcdir)/monit/Makefile.inc \
+ $(srcdir)/mysql/Makefile.inc $(srcdir)/nginx/Makefile.inc \
+ $(srcdir)/nginx_plus/Makefile.inc $(srcdir)/nsd/Makefile.inc \
+ $(srcdir)/ntpd/Makefile.inc \
+ $(srcdir)/ovpn_status_log/Makefile.inc \
+ $(srcdir)/phpfpm/Makefile.inc $(srcdir)/portcheck/Makefile.inc \
+ $(srcdir)/postfix/Makefile.inc $(srcdir)/postgres/Makefile.inc \
+ $(srcdir)/powerdns/Makefile.inc \
+ $(srcdir)/proxysql/Makefile.inc $(srcdir)/puppet/Makefile.inc \
+ $(srcdir)/rabbitmq/Makefile.inc $(srcdir)/redis/Makefile.inc \
+ $(srcdir)/rethinkdbs/Makefile.inc \
+ $(srcdir)/retroshare/Makefile.inc $(srcdir)/samba/Makefile.inc \
+ $(srcdir)/sensors/Makefile.inc \
+ $(srcdir)/smartd_log/Makefile.inc \
+ $(srcdir)/spigotmc/Makefile.inc \
+ $(srcdir)/springboot/Makefile.inc $(srcdir)/squid/Makefile.inc \
+ $(srcdir)/tomcat/Makefile.inc $(srcdir)/traefik/Makefile.inc \
+ $(srcdir)/unbound/Makefile.inc $(srcdir)/uwsgi/Makefile.inc \
+ $(srcdir)/varnish/Makefile.inc $(srcdir)/w1sensor/Makefile.inc \
+ $(srcdir)/web_log/Makefile.inc $(srcdir)/Makefile.in \
+ $(srcdir)/Makefile.am $(dist_plugins_SCRIPTS) \
+ $(dist_python_SCRIPTS) $(dist_bases_DATA) \
+ $(dist_bases_framework_services_DATA) $(dist_libconfig_DATA) \
+ $(dist_noinst_DATA) $(dist_python_DATA) \
+ $(dist_python_urllib3_DATA) \
+ $(dist_python_urllib3_backports_DATA) \
+ $(dist_python_urllib3_contrib_DATA) \
+ $(dist_python_urllib3_packages_DATA) \
+ $(dist_python_urllib3_securetransport_DATA) \
+ $(dist_python_urllib3_ssl_match_hostname_DATA) \
+ $(dist_python_urllib3_util_DATA) $(dist_pythonconfig_DATA) \
+ $(dist_pythonmodules_DATA) $(dist_pythonyaml2_DATA) \
+ $(dist_pythonyaml3_DATA) $(dist_third_party_DATA) \
+ $(dist_userpythonconfig_DATA)
+subdir = collectors/python.d.plugin
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+ test -z "$$files" \
+ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+ $(am__cd) "$$dir" && rm -f $$files; }; \
+ }
+am__installdirs = "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(pythondir)" \
+ "$(DESTDIR)$(basesdir)" \
+ "$(DESTDIR)$(bases_framework_servicesdir)" \
+ "$(DESTDIR)$(libconfigdir)" "$(DESTDIR)$(pythondir)" \
+ "$(DESTDIR)$(python_urllib3dir)" \
+ "$(DESTDIR)$(python_urllib3_backportsdir)" \
+ "$(DESTDIR)$(python_urllib3_contribdir)" \
+ "$(DESTDIR)$(python_urllib3_packagesdir)" \
+ "$(DESTDIR)$(python_urllib3_securetransportdir)" \
+ "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" \
+ "$(DESTDIR)$(python_urllib3_utildir)" \
+ "$(DESTDIR)$(pythonconfigdir)" "$(DESTDIR)$(pythonmodulesdir)" \
+ "$(DESTDIR)$(pythonyaml2dir)" "$(DESTDIR)$(pythonyaml3dir)" \
+ "$(DESTDIR)$(third_partydir)" \
+ "$(DESTDIR)$(userpythonconfigdir)"
+SCRIPTS = $(dist_plugins_SCRIPTS) $(dist_python_SCRIPTS)
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_bases_DATA) $(dist_bases_framework_services_DATA) \
+ $(dist_libconfig_DATA) $(dist_noinst_DATA) $(dist_python_DATA) \
+ $(dist_python_urllib3_DATA) \
+ $(dist_python_urllib3_backports_DATA) \
+ $(dist_python_urllib3_contrib_DATA) \
+ $(dist_python_urllib3_packages_DATA) \
+ $(dist_python_urllib3_securetransport_DATA) \
+ $(dist_python_urllib3_ssl_match_hostname_DATA) \
+ $(dist_python_urllib3_util_DATA) $(dist_pythonconfig_DATA) \
+ $(dist_pythonmodules_DATA) $(dist_pythonyaml2_DATA) \
+ $(dist_pythonyaml3_DATA) $(dist_third_party_DATA) \
+ $(dist_userpythonconfig_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+CLEANFILES = \
+ python.d.plugin \
+ $(NULL)
+
+SUFFIXES = .in
+dist_libconfig_DATA = \
+ python.d.conf \
+ $(NULL)
+
+dist_plugins_SCRIPTS = \
+ python.d.plugin \
+ $(NULL)
+
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA = python.d.plugin.in README.md $(NULL) \
+ adaptec_raid/README.md adaptec_raid/Makefile.inc \
+ apache/README.md apache/Makefile.inc beanstalk/README.md \
+ beanstalk/Makefile.inc bind_rndc/README.md \
+ bind_rndc/Makefile.inc boinc/README.md boinc/Makefile.inc \
+ ceph/README.md ceph/Makefile.inc chrony/README.md \
+ chrony/Makefile.inc couchdb/README.md couchdb/Makefile.inc \
+ cpufreq/README.md cpufreq/Makefile.inc cpuidle/README.md \
+ cpuidle/Makefile.inc dnsdist/README.md dnsdist/Makefile.inc \
+ dns_query_time/README.md dns_query_time/Makefile.inc \
+ dockerd/README.md dockerd/Makefile.inc dovecot/README.md \
+ dovecot/Makefile.inc elasticsearch/README.md \
+ elasticsearch/Makefile.inc example/README.md \
+ example/Makefile.inc exim/README.md exim/Makefile.inc \
+ fail2ban/README.md fail2ban/Makefile.inc freeradius/README.md \
+ freeradius/Makefile.inc go_expvar/README.md \
+ go_expvar/Makefile.inc haproxy/README.md haproxy/Makefile.inc \
+ hddtemp/README.md hddtemp/Makefile.inc httpcheck/README.md \
+ httpcheck/Makefile.inc icecast/README.md icecast/Makefile.inc \
+ ipfs/README.md ipfs/Makefile.inc isc_dhcpd/README.md \
+ isc_dhcpd/Makefile.inc linux_power_supply/README.md \
+ linux_power_supply/Makefile.inc litespeed/README.md \
+ litespeed/Makefile.inc logind/README.md logind/Makefile.inc \
+ mdstat/README.md mdstat/Makefile.inc megacli/README.md \
+ megacli/Makefile.inc memcached/README.md \
+ memcached/Makefile.inc mongodb/README.md mongodb/Makefile.inc \
+ monit/README.md monit/Makefile.inc mysql/README.md \
+ mysql/Makefile.inc nginx/README.md nginx/Makefile.inc \
+ nginx_plus/README.md nginx_plus/Makefile.inc nsd/README.md \
+ nsd/Makefile.inc ntpd/README.md ntpd/Makefile.inc \
+ ovpn_status_log/README.md ovpn_status_log/Makefile.inc \
+ phpfpm/README.md phpfpm/Makefile.inc portcheck/README.md \
+ portcheck/Makefile.inc postfix/README.md postfix/Makefile.inc \
+ postgres/README.md postgres/Makefile.inc powerdns/README.md \
+ powerdns/Makefile.inc proxysql/README.md proxysql/Makefile.inc \
+ puppet/README.md puppet/Makefile.inc rabbitmq/README.md \
+ rabbitmq/Makefile.inc redis/README.md redis/Makefile.inc \
+ rethinkdbs/README.md rethinkdbs/Makefile.inc \
+ retroshare/README.md retroshare/Makefile.inc samba/README.md \
+ samba/Makefile.inc sensors/README.md sensors/Makefile.inc \
+ smartd_log/README.md smartd_log/Makefile.inc \
+ spigotmc/README.md spigotmc/Makefile.inc springboot/README.md \
+ springboot/Makefile.inc squid/README.md squid/Makefile.inc \
+ tomcat/README.md tomcat/Makefile.inc traefik/README.md \
+ traefik/Makefile.inc unbound/README.md unbound/Makefile.inc \
+ uwsgi/README.md uwsgi/Makefile.inc varnish/README.md \
+ varnish/Makefile.inc w1sensor/README.md w1sensor/Makefile.inc \
+ web_log/README.md web_log/Makefile.inc
+dist_python_SCRIPTS = \
+ $(NULL)
+
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+dist_python_DATA = $(NULL) adaptec_raid/adaptec_raid.chart.py \
+ apache/apache.chart.py beanstalk/beanstalk.chart.py \
+ bind_rndc/bind_rndc.chart.py boinc/boinc.chart.py \
+ ceph/ceph.chart.py chrony/chrony.chart.py \
+ couchdb/couchdb.chart.py cpufreq/cpufreq.chart.py \
+ cpuidle/cpuidle.chart.py dnsdist/dnsdist.chart.py \
+ dns_query_time/dns_query_time.chart.py \
+ dockerd/dockerd.chart.py dovecot/dovecot.chart.py \
+ elasticsearch/elasticsearch.chart.py example/example.chart.py \
+ exim/exim.chart.py fail2ban/fail2ban.chart.py \
+ freeradius/freeradius.chart.py go_expvar/go_expvar.chart.py \
+ haproxy/haproxy.chart.py hddtemp/hddtemp.chart.py \
+ httpcheck/httpcheck.chart.py icecast/icecast.chart.py \
+ ipfs/ipfs.chart.py isc_dhcpd/isc_dhcpd.chart.py \
+ linux_power_supply/linux_power_supply.chart.py \
+ litespeed/litespeed.chart.py logind/logind.chart.py \
+ mdstat/mdstat.chart.py megacli/megacli.chart.py \
+ memcached/memcached.chart.py mongodb/mongodb.chart.py \
+ monit/monit.chart.py mysql/mysql.chart.py nginx/nginx.chart.py \
+ nginx_plus/nginx_plus.chart.py nsd/nsd.chart.py \
+ ntpd/ntpd.chart.py ovpn_status_log/ovpn_status_log.chart.py \
+ phpfpm/phpfpm.chart.py portcheck/portcheck.chart.py \
+ postfix/postfix.chart.py postgres/postgres.chart.py \
+ powerdns/powerdns.chart.py proxysql/proxysql.chart.py \
+ puppet/puppet.chart.py rabbitmq/rabbitmq.chart.py \
+ redis/redis.chart.py rethinkdbs/rethinkdbs.chart.py \
+ retroshare/retroshare.chart.py samba/samba.chart.py \
+ sensors/sensors.chart.py smartd_log/smartd_log.chart.py \
+ spigotmc/spigotmc.chart.py springboot/springboot.chart.py \
+ squid/squid.chart.py tomcat/tomcat.chart.py \
+ traefik/traefik.chart.py unbound/unbound.chart.py \
+ uwsgi/uwsgi.chart.py varnish/varnish.chart.py \
+ w1sensor/w1sensor.chart.py web_log/web_log.chart.py
+userpythonconfigdir = $(configdir)/python.d
+dist_userpythonconfig_DATA = \
+ $(top_srcdir)/installer/.keep \
+ $(NULL)
+
+pythonconfigdir = $(libconfigdir)/python.d
+dist_pythonconfig_DATA = $(top_srcdir)/installer/.keep $(NULL) \
+ adaptec_raid/adaptec_raid.conf apache/apache.conf \
+ beanstalk/beanstalk.conf bind_rndc/bind_rndc.conf \
+ boinc/boinc.conf ceph/ceph.conf chrony/chrony.conf \
+ couchdb/couchdb.conf cpufreq/cpufreq.conf cpuidle/cpuidle.conf \
+ dnsdist/dnsdist.conf dns_query_time/dns_query_time.conf \
+ dockerd/dockerd.conf dovecot/dovecot.conf \
+ elasticsearch/elasticsearch.conf example/example.conf \
+ exim/exim.conf fail2ban/fail2ban.conf \
+ freeradius/freeradius.conf go_expvar/go_expvar.conf \
+ haproxy/haproxy.conf hddtemp/hddtemp.conf \
+ httpcheck/httpcheck.conf icecast/icecast.conf ipfs/ipfs.conf \
+ isc_dhcpd/isc_dhcpd.conf \
+ linux_power_supply/linux_power_supply.conf \
+ litespeed/litespeed.conf logind/logind.conf mdstat/mdstat.conf \
+ megacli/megacli.conf memcached/memcached.conf \
+ mongodb/mongodb.conf monit/monit.conf mysql/mysql.conf \
+ nginx/nginx.conf nginx_plus/nginx_plus.conf nsd/nsd.conf \
+ ntpd/ntpd.conf ovpn_status_log/ovpn_status_log.conf \
+ phpfpm/phpfpm.conf portcheck/portcheck.conf \
+ postfix/postfix.conf postgres/postgres.conf \
+ powerdns/powerdns.conf proxysql/proxysql.conf \
+ puppet/puppet.conf rabbitmq/rabbitmq.conf redis/redis.conf \
+ rethinkdbs/rethinkdbs.conf retroshare/retroshare.conf \
+ samba/samba.conf sensors/sensors.conf \
+ smartd_log/smartd_log.conf spigotmc/spigotmc.conf \
+ springboot/springboot.conf squid/squid.conf tomcat/tomcat.conf \
+ traefik/traefik.conf unbound/unbound.conf uwsgi/uwsgi.conf \
+ varnish/varnish.conf w1sensor/w1sensor.conf \
+ web_log/web_log.conf
+pythonmodulesdir = $(pythondir)/python_modules
+dist_pythonmodules_DATA = \
+ python_modules/__init__.py \
+ $(NULL)
+
+basesdir = $(pythonmodulesdir)/bases
+dist_bases_DATA = \
+ python_modules/bases/__init__.py \
+ python_modules/bases/charts.py \
+ python_modules/bases/collection.py \
+ python_modules/bases/loaders.py \
+ python_modules/bases/loggers.py \
+ $(NULL)
+
+bases_framework_servicesdir = $(basesdir)/FrameworkServices
+dist_bases_framework_services_DATA = \
+ python_modules/bases/FrameworkServices/__init__.py \
+ python_modules/bases/FrameworkServices/ExecutableService.py \
+ python_modules/bases/FrameworkServices/LogService.py \
+ python_modules/bases/FrameworkServices/MySQLService.py \
+ python_modules/bases/FrameworkServices/SimpleService.py \
+ python_modules/bases/FrameworkServices/SocketService.py \
+ python_modules/bases/FrameworkServices/UrlService.py \
+ $(NULL)
+
+third_partydir = $(pythonmodulesdir)/third_party
+dist_third_party_DATA = \
+ python_modules/third_party/__init__.py \
+ python_modules/third_party/ordereddict.py \
+ python_modules/third_party/lm_sensors.py \
+ python_modules/third_party/mcrcon.py \
+ python_modules/third_party/boinc_client.py \
+ python_modules/third_party/monotonic.py \
+ $(NULL)
+
+pythonyaml2dir = $(pythonmodulesdir)/pyyaml2
+dist_pythonyaml2_DATA = \
+ python_modules/pyyaml2/__init__.py \
+ python_modules/pyyaml2/composer.py \
+ python_modules/pyyaml2/constructor.py \
+ python_modules/pyyaml2/cyaml.py \
+ python_modules/pyyaml2/dumper.py \
+ python_modules/pyyaml2/emitter.py \
+ python_modules/pyyaml2/error.py \
+ python_modules/pyyaml2/events.py \
+ python_modules/pyyaml2/loader.py \
+ python_modules/pyyaml2/nodes.py \
+ python_modules/pyyaml2/parser.py \
+ python_modules/pyyaml2/reader.py \
+ python_modules/pyyaml2/representer.py \
+ python_modules/pyyaml2/resolver.py \
+ python_modules/pyyaml2/scanner.py \
+ python_modules/pyyaml2/serializer.py \
+ python_modules/pyyaml2/tokens.py \
+ $(NULL)
+
+pythonyaml3dir = $(pythonmodulesdir)/pyyaml3
+dist_pythonyaml3_DATA = \
+ python_modules/pyyaml3/__init__.py \
+ python_modules/pyyaml3/composer.py \
+ python_modules/pyyaml3/constructor.py \
+ python_modules/pyyaml3/cyaml.py \
+ python_modules/pyyaml3/dumper.py \
+ python_modules/pyyaml3/emitter.py \
+ python_modules/pyyaml3/error.py \
+ python_modules/pyyaml3/events.py \
+ python_modules/pyyaml3/loader.py \
+ python_modules/pyyaml3/nodes.py \
+ python_modules/pyyaml3/parser.py \
+ python_modules/pyyaml3/reader.py \
+ python_modules/pyyaml3/representer.py \
+ python_modules/pyyaml3/resolver.py \
+ python_modules/pyyaml3/scanner.py \
+ python_modules/pyyaml3/serializer.py \
+ python_modules/pyyaml3/tokens.py \
+ $(NULL)
+
+python_urllib3dir = $(pythonmodulesdir)/urllib3
+dist_python_urllib3_DATA = \
+ python_modules/urllib3/__init__.py \
+ python_modules/urllib3/_collections.py \
+ python_modules/urllib3/connection.py \
+ python_modules/urllib3/connectionpool.py \
+ python_modules/urllib3/exceptions.py \
+ python_modules/urllib3/fields.py \
+ python_modules/urllib3/filepost.py \
+ python_modules/urllib3/response.py \
+ python_modules/urllib3/poolmanager.py \
+ python_modules/urllib3/request.py \
+ $(NULL)
+
+python_urllib3_utildir = $(python_urllib3dir)/util
+dist_python_urllib3_util_DATA = \
+ python_modules/urllib3/util/__init__.py \
+ python_modules/urllib3/util/connection.py \
+ python_modules/urllib3/util/request.py \
+ python_modules/urllib3/util/response.py \
+ python_modules/urllib3/util/retry.py \
+ python_modules/urllib3/util/selectors.py \
+ python_modules/urllib3/util/ssl_.py \
+ python_modules/urllib3/util/timeout.py \
+ python_modules/urllib3/util/url.py \
+ python_modules/urllib3/util/wait.py \
+ $(NULL)
+
+python_urllib3_packagesdir = $(python_urllib3dir)/packages
+dist_python_urllib3_packages_DATA = \
+ python_modules/urllib3/packages/__init__.py \
+ python_modules/urllib3/packages/ordered_dict.py \
+ python_modules/urllib3/packages/six.py \
+ $(NULL)
+
+python_urllib3_backportsdir = $(python_urllib3_packagesdir)/backports
+dist_python_urllib3_backports_DATA = \
+ python_modules/urllib3/packages/backports/__init__.py \
+ python_modules/urllib3/packages/backports/makefile.py \
+ $(NULL)
+
+python_urllib3_ssl_match_hostnamedir = $(python_urllib3_packagesdir)/ssl_match_hostname
+dist_python_urllib3_ssl_match_hostname_DATA = \
+ python_modules/urllib3/packages/ssl_match_hostname/__init__.py \
+ python_modules/urllib3/packages/ssl_match_hostname/_implementation.py \
+ $(NULL)
+
+python_urllib3_contribdir = $(python_urllib3dir)/contrib
+dist_python_urllib3_contrib_DATA = \
+ python_modules/urllib3/contrib/__init__.py \
+ python_modules/urllib3/contrib/appengine.py \
+ python_modules/urllib3/contrib/ntlmpool.py \
+ python_modules/urllib3/contrib/pyopenssl.py \
+ python_modules/urllib3/contrib/securetransport.py \
+ python_modules/urllib3/contrib/socks.py \
+ $(NULL)
+
+python_urllib3_securetransportdir = $(python_urllib3_contribdir)/_securetransport
+dist_python_urllib3_securetransport_DATA = \
+ python_modules/urllib3/contrib/_securetransport/__init__.py \
+ python_modules/urllib3/contrib/_securetransport/bindings.py \
+ python_modules/urllib3/contrib/_securetransport/low_level.py \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .in
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(srcdir)/adaptec_raid/Makefile.inc $(srcdir)/apache/Makefile.inc $(srcdir)/beanstalk/Makefile.inc $(srcdir)/bind_rndc/Makefile.inc $(srcdir)/boinc/Makefile.inc $(srcdir)/ceph/Makefile.inc $(srcdir)/chrony/Makefile.inc $(srcdir)/couchdb/Makefile.inc $(srcdir)/cpufreq/Makefile.inc $(srcdir)/cpuidle/Makefile.inc $(srcdir)/dnsdist/Makefile.inc $(srcdir)/dns_query_time/Makefile.inc $(srcdir)/dockerd/Makefile.inc $(srcdir)/dovecot/Makefile.inc $(srcdir)/elasticsearch/Makefile.inc $(srcdir)/example/Makefile.inc $(srcdir)/exim/Makefile.inc $(srcdir)/fail2ban/Makefile.inc $(srcdir)/freeradius/Makefile.inc $(srcdir)/go_expvar/Makefile.inc $(srcdir)/haproxy/Makefile.inc $(srcdir)/hddtemp/Makefile.inc $(srcdir)/httpcheck/Makefile.inc $(srcdir)/icecast/Makefile.inc $(srcdir)/ipfs/Makefile.inc $(srcdir)/isc_dhcpd/Makefile.inc $(srcdir)/linux_power_supply/Makefile.inc $(srcdir)/litespeed/Makefile.inc $(srcdir)/logind/Makefile.inc $(srcdir)/mdstat/Makefile.inc $(srcdir)/megacli/Makefile.inc $(srcdir)/memcached/Makefile.inc $(srcdir)/mongodb/Makefile.inc $(srcdir)/monit/Makefile.inc $(srcdir)/mysql/Makefile.inc $(srcdir)/nginx/Makefile.inc $(srcdir)/nginx_plus/Makefile.inc $(srcdir)/nsd/Makefile.inc $(srcdir)/ntpd/Makefile.inc $(srcdir)/ovpn_status_log/Makefile.inc $(srcdir)/phpfpm/Makefile.inc $(srcdir)/portcheck/Makefile.inc $(srcdir)/postfix/Makefile.inc $(srcdir)/postgres/Makefile.inc $(srcdir)/powerdns/Makefile.inc $(srcdir)/proxysql/Makefile.inc $(srcdir)/puppet/Makefile.inc $(srcdir)/rabbitmq/Makefile.inc $(srcdir)/redis/Makefile.inc $(srcdir)/rethinkdbs/Makefile.inc $(srcdir)/retroshare/Makefile.inc $(srcdir)/samba/Makefile.inc $(srcdir)/sensors/Makefile.inc $(srcdir)/smartd_log/Makefile.inc $(srcdir)/spigotmc/Makefile.inc $(srcdir)/springboot/Makefile.inc $(srcdir)/squid/Makefile.inc $(srcdir)/tomcat/Makefile.inc $(srcdir)/traefik/Makefile.inc $(srcdir)/unbound/Makefile.inc $(srcdir)/uwsgi/Makefile.inc $(srcdir)/varnish/Makefile.inc $(srcdir)/w1sensor/Makefile.inc $(srcdir)/web_log/Makefile.inc $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/python.d.plugin/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu collectors/python.d.plugin/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+$(top_srcdir)/build/subst.inc $(srcdir)/adaptec_raid/Makefile.inc $(srcdir)/apache/Makefile.inc $(srcdir)/beanstalk/Makefile.inc $(srcdir)/bind_rndc/Makefile.inc $(srcdir)/boinc/Makefile.inc $(srcdir)/ceph/Makefile.inc $(srcdir)/chrony/Makefile.inc $(srcdir)/couchdb/Makefile.inc $(srcdir)/cpufreq/Makefile.inc $(srcdir)/cpuidle/Makefile.inc $(srcdir)/dnsdist/Makefile.inc $(srcdir)/dns_query_time/Makefile.inc $(srcdir)/dockerd/Makefile.inc $(srcdir)/dovecot/Makefile.inc $(srcdir)/elasticsearch/Makefile.inc $(srcdir)/example/Makefile.inc $(srcdir)/exim/Makefile.inc $(srcdir)/fail2ban/Makefile.inc $(srcdir)/freeradius/Makefile.inc $(srcdir)/go_expvar/Makefile.inc $(srcdir)/haproxy/Makefile.inc $(srcdir)/hddtemp/Makefile.inc $(srcdir)/httpcheck/Makefile.inc $(srcdir)/icecast/Makefile.inc $(srcdir)/ipfs/Makefile.inc $(srcdir)/isc_dhcpd/Makefile.inc $(srcdir)/linux_power_supply/Makefile.inc $(srcdir)/litespeed/Makefile.inc $(srcdir)/logind/Makefile.inc $(srcdir)/mdstat/Makefile.inc $(srcdir)/megacli/Makefile.inc $(srcdir)/memcached/Makefile.inc $(srcdir)/mongodb/Makefile.inc $(srcdir)/monit/Makefile.inc $(srcdir)/mysql/Makefile.inc $(srcdir)/nginx/Makefile.inc $(srcdir)/nginx_plus/Makefile.inc $(srcdir)/nsd/Makefile.inc $(srcdir)/ntpd/Makefile.inc $(srcdir)/ovpn_status_log/Makefile.inc $(srcdir)/phpfpm/Makefile.inc $(srcdir)/portcheck/Makefile.inc $(srcdir)/postfix/Makefile.inc $(srcdir)/postgres/Makefile.inc $(srcdir)/powerdns/Makefile.inc $(srcdir)/proxysql/Makefile.inc $(srcdir)/puppet/Makefile.inc $(srcdir)/rabbitmq/Makefile.inc $(srcdir)/redis/Makefile.inc $(srcdir)/rethinkdbs/Makefile.inc $(srcdir)/retroshare/Makefile.inc $(srcdir)/samba/Makefile.inc $(srcdir)/sensors/Makefile.inc $(srcdir)/smartd_log/Makefile.inc $(srcdir)/spigotmc/Makefile.inc $(srcdir)/springboot/Makefile.inc $(srcdir)/squid/Makefile.inc $(srcdir)/tomcat/Makefile.inc $(srcdir)/traefik/Makefile.inc $(srcdir)/unbound/Makefile.inc $(srcdir)/uwsgi/Makefile.inc $(srcdir)/varnish/Makefile.inc $(srcdir)/w1sensor/Makefile.inc $(srcdir)/web_log/Makefile.inc:
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
+ done | \
+ sed -e 'p;s,.*/,,;n' \
+ -e 'h;s|.*|.|' \
+ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
+ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
+ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
+ if ($$2 == $$4) { files[d] = files[d] " " $$1; \
+ if (++n[d] == $(am__install_max)) { \
+ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
+ else { print "f", d "/" $$4, $$1 } } \
+ END { for (d in files) print "f", d, files[d] }' | \
+ while read type dir files; do \
+ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
+ test -z "$$files" || { \
+ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \
+ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \
+ } \
+ ; done
+
+uninstall-dist_pluginsSCRIPTS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \
+ files=`for p in $$list; do echo "$$p"; done | \
+ sed -e 's,.*/,,;$(transform)'`; \
+ dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir)
+install-dist_pythonSCRIPTS: $(dist_python_SCRIPTS)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_python_SCRIPTS)'; test -n "$(pythondir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pythondir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pythondir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
+ done | \
+ sed -e 'p;s,.*/,,;n' \
+ -e 'h;s|.*|.|' \
+ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
+ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
+ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
+ if ($$2 == $$4) { files[d] = files[d] " " $$1; \
+ if (++n[d] == $(am__install_max)) { \
+ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
+ else { print "f", d "/" $$4, $$1 } } \
+ END { for (d in files) print "f", d, files[d] }' | \
+ while read type dir files; do \
+ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
+ test -z "$$files" || { \
+ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pythondir)$$dir'"; \
+ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pythondir)$$dir" || exit $$?; \
+ } \
+ ; done
+
+uninstall-dist_pythonSCRIPTS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_python_SCRIPTS)'; test -n "$(pythondir)" || exit 0; \
+ files=`for p in $$list; do echo "$$p"; done | \
+ sed -e 's,.*/,,;$(transform)'`; \
+ dir='$(DESTDIR)$(pythondir)'; $(am__uninstall_files_from_dir)
+install-dist_basesDATA: $(dist_bases_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_bases_DATA)'; test -n "$(basesdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(basesdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(basesdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(basesdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(basesdir)" || exit $$?; \
+ done
+
+uninstall-dist_basesDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_bases_DATA)'; test -n "$(basesdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(basesdir)'; $(am__uninstall_files_from_dir)
+install-dist_bases_framework_servicesDATA: $(dist_bases_framework_services_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_bases_framework_services_DATA)'; test -n "$(bases_framework_servicesdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(bases_framework_servicesdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(bases_framework_servicesdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(bases_framework_servicesdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(bases_framework_servicesdir)" || exit $$?; \
+ done
+
+uninstall-dist_bases_framework_servicesDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_bases_framework_services_DATA)'; test -n "$(bases_framework_servicesdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(bases_framework_servicesdir)'; $(am__uninstall_files_from_dir)
+install-dist_libconfigDATA: $(dist_libconfig_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \
+ done
+
+uninstall-dist_libconfigDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir)
+install-dist_pythonDATA: $(dist_python_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_python_DATA)'; test -n "$(pythondir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pythondir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pythondir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythondir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(pythondir)" || exit $$?; \
+ done
+
+uninstall-dist_pythonDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_python_DATA)'; test -n "$(pythondir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(pythondir)'; $(am__uninstall_files_from_dir)
+install-dist_python_urllib3DATA: $(dist_python_urllib3_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_python_urllib3_DATA)'; test -n "$(python_urllib3dir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3dir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(python_urllib3dir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3dir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3dir)" || exit $$?; \
+ done
+
+uninstall-dist_python_urllib3DATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_python_urllib3_DATA)'; test -n "$(python_urllib3dir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(python_urllib3dir)'; $(am__uninstall_files_from_dir)
+install-dist_python_urllib3_backportsDATA: $(dist_python_urllib3_backports_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_python_urllib3_backports_DATA)'; test -n "$(python_urllib3_backportsdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_backportsdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(python_urllib3_backportsdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_backportsdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_backportsdir)" || exit $$?; \
+ done
+
+uninstall-dist_python_urllib3_backportsDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_python_urllib3_backports_DATA)'; test -n "$(python_urllib3_backportsdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(python_urllib3_backportsdir)'; $(am__uninstall_files_from_dir)
+install-dist_python_urllib3_contribDATA: $(dist_python_urllib3_contrib_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_python_urllib3_contrib_DATA)'; test -n "$(python_urllib3_contribdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_contribdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(python_urllib3_contribdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_contribdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_contribdir)" || exit $$?; \
+ done
+
+uninstall-dist_python_urllib3_contribDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_python_urllib3_contrib_DATA)'; test -n "$(python_urllib3_contribdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(python_urllib3_contribdir)'; $(am__uninstall_files_from_dir)
+install-dist_python_urllib3_packagesDATA: $(dist_python_urllib3_packages_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_python_urllib3_packages_DATA)'; test -n "$(python_urllib3_packagesdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_packagesdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(python_urllib3_packagesdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_packagesdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_packagesdir)" || exit $$?; \
+ done
+
+uninstall-dist_python_urllib3_packagesDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_python_urllib3_packages_DATA)'; test -n "$(python_urllib3_packagesdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(python_urllib3_packagesdir)'; $(am__uninstall_files_from_dir)
+install-dist_python_urllib3_securetransportDATA: $(dist_python_urllib3_securetransport_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_python_urllib3_securetransport_DATA)'; test -n "$(python_urllib3_securetransportdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_securetransportdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(python_urllib3_securetransportdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_securetransportdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_securetransportdir)" || exit $$?; \
+ done
+
+uninstall-dist_python_urllib3_securetransportDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_python_urllib3_securetransport_DATA)'; test -n "$(python_urllib3_securetransportdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(python_urllib3_securetransportdir)'; $(am__uninstall_files_from_dir)
+install-dist_python_urllib3_ssl_match_hostnameDATA: $(dist_python_urllib3_ssl_match_hostname_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_python_urllib3_ssl_match_hostname_DATA)'; test -n "$(python_urllib3_ssl_match_hostnamedir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" || exit $$?; \
+ done
+
+uninstall-dist_python_urllib3_ssl_match_hostnameDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_python_urllib3_ssl_match_hostname_DATA)'; test -n "$(python_urllib3_ssl_match_hostnamedir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)'; $(am__uninstall_files_from_dir)
+install-dist_python_urllib3_utilDATA: $(dist_python_urllib3_util_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_python_urllib3_util_DATA)'; test -n "$(python_urllib3_utildir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_utildir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(python_urllib3_utildir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_utildir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_utildir)" || exit $$?; \
+ done
+
+uninstall-dist_python_urllib3_utilDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_python_urllib3_util_DATA)'; test -n "$(python_urllib3_utildir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(python_urllib3_utildir)'; $(am__uninstall_files_from_dir)
+install-dist_pythonconfigDATA: $(dist_pythonconfig_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_pythonconfig_DATA)'; test -n "$(pythonconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pythonconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pythonconfigdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonconfigdir)" || exit $$?; \
+ done
+
+uninstall-dist_pythonconfigDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_pythonconfig_DATA)'; test -n "$(pythonconfigdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(pythonconfigdir)'; $(am__uninstall_files_from_dir)
+install-dist_pythonmodulesDATA: $(dist_pythonmodules_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_pythonmodules_DATA)'; test -n "$(pythonmodulesdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pythonmodulesdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pythonmodulesdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonmodulesdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonmodulesdir)" || exit $$?; \
+ done
+
+uninstall-dist_pythonmodulesDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_pythonmodules_DATA)'; test -n "$(pythonmodulesdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(pythonmodulesdir)'; $(am__uninstall_files_from_dir)
+install-dist_pythonyaml2DATA: $(dist_pythonyaml2_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_pythonyaml2_DATA)'; test -n "$(pythonyaml2dir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pythonyaml2dir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pythonyaml2dir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonyaml2dir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonyaml2dir)" || exit $$?; \
+ done
+
+uninstall-dist_pythonyaml2DATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_pythonyaml2_DATA)'; test -n "$(pythonyaml2dir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(pythonyaml2dir)'; $(am__uninstall_files_from_dir)
+install-dist_pythonyaml3DATA: $(dist_pythonyaml3_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_pythonyaml3_DATA)'; test -n "$(pythonyaml3dir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pythonyaml3dir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pythonyaml3dir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonyaml3dir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonyaml3dir)" || exit $$?; \
+ done
+
+uninstall-dist_pythonyaml3DATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_pythonyaml3_DATA)'; test -n "$(pythonyaml3dir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(pythonyaml3dir)'; $(am__uninstall_files_from_dir)
+install-dist_third_partyDATA: $(dist_third_party_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_third_party_DATA)'; test -n "$(third_partydir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(third_partydir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(third_partydir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(third_partydir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(third_partydir)" || exit $$?; \
+ done
+
+uninstall-dist_third_partyDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_third_party_DATA)'; test -n "$(third_partydir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(third_partydir)'; $(am__uninstall_files_from_dir)
+install-dist_userpythonconfigDATA: $(dist_userpythonconfig_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_userpythonconfig_DATA)'; test -n "$(userpythonconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(userpythonconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(userpythonconfigdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(userpythonconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(userpythonconfigdir)" || exit $$?; \
+ done
+
+uninstall-dist_userpythonconfigDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_userpythonconfig_DATA)'; test -n "$(userpythonconfigdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(userpythonconfigdir)'; $(am__uninstall_files_from_dir)
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(SCRIPTS) $(DATA)
+installdirs:
+ for dir in "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(pythondir)" "$(DESTDIR)$(basesdir)" "$(DESTDIR)$(bases_framework_servicesdir)" "$(DESTDIR)$(libconfigdir)" "$(DESTDIR)$(pythondir)" "$(DESTDIR)$(python_urllib3dir)" "$(DESTDIR)$(python_urllib3_backportsdir)" "$(DESTDIR)$(python_urllib3_contribdir)" "$(DESTDIR)$(python_urllib3_packagesdir)" "$(DESTDIR)$(python_urllib3_securetransportdir)" "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" "$(DESTDIR)$(python_urllib3_utildir)" "$(DESTDIR)$(pythonconfigdir)" "$(DESTDIR)$(pythonmodulesdir)" "$(DESTDIR)$(pythonyaml2dir)" "$(DESTDIR)$(pythonyaml3dir)" "$(DESTDIR)$(third_partydir)" "$(DESTDIR)$(userpythonconfigdir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+ -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am: install-dist_basesDATA \
+ install-dist_bases_framework_servicesDATA \
+ install-dist_libconfigDATA install-dist_pluginsSCRIPTS \
+ install-dist_pythonDATA install-dist_pythonSCRIPTS \
+ install-dist_python_urllib3DATA \
+ install-dist_python_urllib3_backportsDATA \
+ install-dist_python_urllib3_contribDATA \
+ install-dist_python_urllib3_packagesDATA \
+ install-dist_python_urllib3_securetransportDATA \
+ install-dist_python_urllib3_ssl_match_hostnameDATA \
+ install-dist_python_urllib3_utilDATA \
+ install-dist_pythonconfigDATA install-dist_pythonmodulesDATA \
+ install-dist_pythonyaml2DATA install-dist_pythonyaml3DATA \
+ install-dist_third_partyDATA install-dist_userpythonconfigDATA
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-dist_basesDATA \
+ uninstall-dist_bases_framework_servicesDATA \
+ uninstall-dist_libconfigDATA uninstall-dist_pluginsSCRIPTS \
+ uninstall-dist_pythonDATA uninstall-dist_pythonSCRIPTS \
+ uninstall-dist_python_urllib3DATA \
+ uninstall-dist_python_urllib3_backportsDATA \
+ uninstall-dist_python_urllib3_contribDATA \
+ uninstall-dist_python_urllib3_packagesDATA \
+ uninstall-dist_python_urllib3_securetransportDATA \
+ uninstall-dist_python_urllib3_ssl_match_hostnameDATA \
+ uninstall-dist_python_urllib3_utilDATA \
+ uninstall-dist_pythonconfigDATA \
+ uninstall-dist_pythonmodulesDATA \
+ uninstall-dist_pythonyaml2DATA uninstall-dist_pythonyaml3DATA \
+ uninstall-dist_third_partyDATA \
+ uninstall-dist_userpythonconfigDATA
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dist_basesDATA \
+ install-dist_bases_framework_servicesDATA \
+ install-dist_libconfigDATA install-dist_pluginsSCRIPTS \
+ install-dist_pythonDATA install-dist_pythonSCRIPTS \
+ install-dist_python_urllib3DATA \
+ install-dist_python_urllib3_backportsDATA \
+ install-dist_python_urllib3_contribDATA \
+ install-dist_python_urllib3_packagesDATA \
+ install-dist_python_urllib3_securetransportDATA \
+ install-dist_python_urllib3_ssl_match_hostnameDATA \
+ install-dist_python_urllib3_utilDATA \
+ install-dist_pythonconfigDATA install-dist_pythonmodulesDATA \
+ install-dist_pythonyaml2DATA install-dist_pythonyaml3DATA \
+ install-dist_third_partyDATA install-dist_userpythonconfigDATA \
+ install-dvi install-dvi-am install-exec install-exec-am \
+ install-html install-html-am install-info install-info-am \
+ install-man install-pdf install-pdf-am install-ps \
+ install-ps-am install-strip installcheck installcheck-am \
+ installdirs maintainer-clean maintainer-clean-generic \
+ mostlyclean mostlyclean-generic pdf pdf-am ps ps-am tags-am \
+ uninstall uninstall-am uninstall-dist_basesDATA \
+ uninstall-dist_bases_framework_servicesDATA \
+ uninstall-dist_libconfigDATA uninstall-dist_pluginsSCRIPTS \
+ uninstall-dist_pythonDATA uninstall-dist_pythonSCRIPTS \
+ uninstall-dist_python_urllib3DATA \
+ uninstall-dist_python_urllib3_backportsDATA \
+ uninstall-dist_python_urllib3_contribDATA \
+ uninstall-dist_python_urllib3_packagesDATA \
+ uninstall-dist_python_urllib3_securetransportDATA \
+ uninstall-dist_python_urllib3_ssl_match_hostnameDATA \
+ uninstall-dist_python_urllib3_utilDATA \
+ uninstall-dist_pythonconfigDATA \
+ uninstall-dist_pythonmodulesDATA \
+ uninstall-dist_pythonyaml2DATA uninstall-dist_pythonyaml3DATA \
+ uninstall-dist_third_partyDATA \
+ uninstall-dist_userpythonconfigDATA
+
+.in:
+ if sed \
+ -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \
+ -e 's#[@]sbindir_POST@#$(sbindir)#g' \
+ -e 's#[@]sysconfdir_POST@#$(sysconfdir)#g' \
+ -e 's#[@]pythondir_POST@#$(pythondir)#g' \
+ -e 's#[@]configdir_POST@#$(configdir)#g' \
+ -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \
+ -e 's#[@]cachedir_POST@#$(cachedir)#g' \
+ $< > $@.tmp; then \
+ mv "$@.tmp" "$@"; \
+ else \
+ rm -f "$@.tmp"; \
+ false; \
+ fi
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/collectors/python.d.plugin/README.md b/collectors/python.d.plugin/README.md
new file mode 100644
index 000000000..df24cd18f
--- /dev/null
+++ b/collectors/python.d.plugin/README.md
@@ -0,0 +1,198 @@
+# python.d.plugin
+
+`python.d.plugin` is a netdata external plugin. It is an **orchestrator** for data collection modules written in `python`.
+
+1. It runs as an independent process `ps fax` shows it
+2. It is started and stopped automatically by netdata
+3. It communicates with netdata via a unidirectional pipe (sending data to the netdata daemon)
+4. Supports any number of data collection **modules**
+5. Allows each **module** to have one or more data collection **jobs**
+6. Each **job** is collecting one or more metrics from a single data source
+
+
+## Disclaimer
+
+Every module should be compatible with python2 and python3.
+All third party libraries should be installed system-wide or in `python_modules` directory.
+Module configurations are written in YAML and **pyYAML is required**.
+
+Every configuration file must have one of two formats:
+
+- Configuration for only one job:
+
+```yaml
+update_every : 2 # update frequency
+retries : 1 # how many failures in update() is tolerated
+priority : 20000 # where it is shown on dashboard
+
+other_var1 : bla # variables passed to module
+other_var2 : alb
+```
+
+- Configuration for many jobs (ex. mysql):
+
+```yaml
+# module defaults:
+update_every : 2
+retries : 1
+priority : 20000
+
+local: # job name
+ update_every : 5 # job update frequency
+ other_var1 : some_val # module specific variable
+
+other_job:
+ priority : 5 # job position on dashboard
+ retries : 20 # job retries
+ other_var2 : val # module specific variable
+```
+
+`update_every`, `retries`, and `priority` are always optional.
+
+---
+
+## How to write a new module
+
+Writing new python module is simple. You just need to remember to include 5 major things:
+- **ORDER** global list
+- **CHART** global dictionary
+- **Service** class
+- **_get_data** method
+- all code needs to be compatible with Python 2 (**≥ 2.7**) *and* 3 (**≥ 3.1**)
+
+If you plan to submit the module in a PR, make sure and go through the [PR checklist for new modules](https://github.com/netdata/netdata/wiki/New-Module-PR-Checklist) beforehand to make sure you have updated all the files you need to.
+
+### Global variables `ORDER` and `CHART`
+
+`ORDER` list should contain the order of chart ids. Example:
+```py
+ORDER = ['first_chart', 'second_chart', 'third_chart']
+```
+
+`CHART` dictionary is a little bit trickier. It should contain the chart definition in following format:
+```py
+CHART = {
+ id: {
+ 'options': [name, title, units, family, context, charttype],
+ 'lines': [
+ [unique_dimension_name, name, algorithm, multiplier, divisor]
+ ]}
+```
+
+All names are better explained in the [External Plugins](../) section.
+Parameters like `priority` and `update_every` are handled by `python.d.plugin`.
+
+### `Service` class
+
+Every module needs to implement its own `Service` class. This class should inherit from one of the framework classes:
+
+- `SimpleService`
+- `UrlService`
+- `SocketService`
+- `LogService`
+- `ExecutableService`
+
+Also it needs to invoke the parent class constructor in a specific way as well as assign global variables to class variables.
+
+Simple example:
+```py
+from base import UrlService
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+```
+
+### `_get_data` collector/parser
+
+This method should grab raw data from `_get_raw_data`, parse it, and return a dictionary where keys are unique dimension names or `None` if no data is collected.
+
+Example:
+```py
+def _get_data(self):
+ try:
+ raw = self._get_raw_data().split(" ")
+ return {'active': int(raw[2])}
+ except (ValueError, AttributeError):
+ return None
+```
+
+More about framework classes
+============================
+
+Every framework class has some user-configurable variables which are specific to this particular class. Those variables should have default values initialized in the child class constructor.
+
+If module needs some additional user-configurable variable, it can be accessed from the `self.configuration` list and assigned in constructor or custom `check` method. Example:
+```py
+def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ try:
+ self.baseurl = str(self.configuration['baseurl'])
+ except (KeyError, TypeError):
+ self.baseurl = "http://localhost:5001"
+```
+
+Classes implement `_get_raw_data` which should be used to grab raw data. This method usually returns a list of strings.
+
+### `SimpleService`
+
+_This is last resort class, if a new module cannot be written by using other framework class this one can be used._
+
+_Example: `mysql`, `sensors`_
+
+It is the lowest-level class which implements most of module logic, like:
+- threading
+- handling run times
+- chart formatting
+- logging
+- chart creation and updating
+
+### `LogService`
+
+_Examples: `apache_cache`, `nginx_log`_
+
+_Variable from config file_: `log_path`.
+
+Object created from this class reads new lines from file specified in `log_path` variable. It will check if file exists and is readable. Also `_get_raw_data` returns list of strings where each string is one line from file specified in `log_path`.
+
+### `ExecutableService`
+
+_Examples: `exim`, `postfix`_
+
+_Variable from config file_: `command`.
+
+This allows to execute a shell command in a secure way. It will check for invalid characters in `command` variable and won't proceed if there is one of:
+- '&'
+- '|'
+- ';'
+- '>'
+- '<'
+
+For additional security it uses python `subprocess.Popen` (without `shell=True` option) to execute command. Command can be specified with absolute or relative name. When using relative name, it will try to find `command` in `PATH` environment variable as well as in `/sbin` and `/usr/sbin`.
+
+`_get_raw_data` returns list of decoded lines returned by `command`.
+
+### UrlService
+
+_Examples: `apache`, `nginx`, `tomcat`_
+
+_Variables from config file_: `url`, `user`, `pass`.
+
+If data is grabbed by accessing service via HTTP protocol, this class can be used. It can handle HTTP Basic Auth when specified with `user` and `pass` credentials.
+
+`_get_raw_data` returns list of utf-8 decoded strings (lines).
+
+### SocketService
+
+_Examples: `dovecot`, `redis`_
+
+_Variables from config file_: `unix_socket`, `host`, `port`, `request`.
+
+Object will try execute `request` using either `unix_socket` or TCP/IP socket with combination of `host` and `port`. This can access unix sockets with SOCK_STREAM or SOCK_DGRAM protocols and TCP/IP sockets in version 4 and 6 with SOCK_STREAM setting.
+
+Sockets are accessed in non-blocking mode with 15 second timeout.
+
+After every execution of `_get_raw_data` socket is closed, to prevent this module needs to set `_keep_alive` variable to `True` and implement custom `_check_raw_data` method.
+
+`_check_raw_data` should take raw data and return `True` if all data is received otherwise it should return `False`. Also it should do it in fast and efficient way. \ No newline at end of file
diff --git a/collectors/python.d.plugin/adaptec_raid/Makefile.inc b/collectors/python.d.plugin/adaptec_raid/Makefile.inc
new file mode 100644
index 000000000..716cdb235
--- /dev/null
+++ b/collectors/python.d.plugin/adaptec_raid/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += adaptec_raid/adaptec_raid.chart.py
+dist_pythonconfig_DATA += adaptec_raid/adaptec_raid.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += adaptec_raid/README.md adaptec_raid/Makefile.inc
+
diff --git a/collectors/python.d.plugin/adaptec_raid/README.md b/collectors/python.d.plugin/adaptec_raid/README.md
new file mode 100644
index 000000000..499dc9190
--- /dev/null
+++ b/collectors/python.d.plugin/adaptec_raid/README.md
@@ -0,0 +1,46 @@
+# adaptec raid
+
+Module collects logical and physical devices health metrics.
+
+**Requirements:**
+* `arcconf` program
+* `sudo` program
+* `netdata` user needs to be able to sudo the `arcconf` program without password
+
+To grab stats it executes:
+ * `sudo -n arcconf GETCONFIG 1 LD`
+ * `sudo -n arcconf GETCONFIG 1 PD`
+
+
+It produces:
+
+1. **Logical Device Status**
+
+2. **Physical Device State**
+
+3. **Physical Device S.M.A.R.T warnings**
+
+4. **Physical Device Temperature**
+
+### prerequisite
+This module uses `arcconf` which can only be executed by root. It uses
+`sudo` and assumes that it is configured such that the `netdata` user can
+execute `arcconf` as root without password.
+
+Add to `sudoers`:
+
+ netdata ALL=(root) NOPASSWD: /path/to/arcconf
+
+### configuration
+
+ **adaptec_raid** is disabled by default. Should be explicitly enabled in `python.d.conf`.
+
+```yaml
+adaptec_raid: yes
+```
+
+#### Screenshot:
+
+![image](https://user-images.githubusercontent.com/22274335/47278133-6d306680-d601-11e8-87c2-cc9c0f42d686.png)
+
+---
diff --git a/collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py b/collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py
new file mode 100644
index 000000000..1fb1e4336
--- /dev/null
+++ b/collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py
@@ -0,0 +1,247 @@
+# -*- coding: utf-8 -*-
+# Description: adaptec_raid netdata python.d module
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+import re
+
+from copy import deepcopy
+
+from bases.FrameworkServices.ExecutableService import ExecutableService
+from bases.collection import find_binary
+
+
+disabled_by_default = True
+
+update_every = 5
+
+ORDER = [
+ 'ld_status',
+ 'pd_state',
+ 'pd_smart_warnings',
+ 'pd_temperature',
+]
+
+CHARTS = {
+ 'ld_status': {
+ 'options': [None, 'Status Is Not OK', 'bool', 'logical devices', 'adapter_raid.ld_status', 'line'],
+ 'lines': []
+ },
+ 'pd_state': {
+ 'options': [None, 'State Is Not OK', 'bool', 'physical devices', 'adapter_raid.pd_state', 'line'],
+ 'lines': []
+ },
+ 'pd_smart_warnings': {
+ 'options': [None, 'S.M.A.R.T warnings', 'count', 'physical devices',
+ 'adapter_raid.smart_warnings', 'line'],
+ 'lines': []
+ },
+ 'pd_temperature': {
+ 'options': [None, 'Temperature', 'celsius', 'physical devices', 'adapter_raid.temperature', 'line'],
+ 'lines': []
+ },
+}
+
+SUDO = 'sudo'
+ARCCONF = 'arcconf'
+
+BAD_LD_STATUS = (
+ 'Degraded',
+ 'Failed',
+)
+
+GOOD_PD_STATUS = (
+ 'Online',
+)
+
+RE_LD = re.compile(
+ r'Logical device number\s+([0-9]+).*?'
+ r'Status of logical device\s+: ([a-zA-Z]+)'
+)
+
+
+def find_lds(d):
+ d = ' '.join(v.strip() for v in d)
+ return [LD(*v) for v in RE_LD.findall(d)]
+
+
+def find_pds(d):
+ pds = list()
+ pd = PD()
+
+ for row in d:
+ row = row.strip()
+ if row.startswith('Device #'):
+ pd = PD()
+ pd.id = row.split('#')[-1]
+ elif not pd.id:
+ continue
+
+ if row.startswith('State'):
+ v = row.split()[-1]
+ pd.state = v
+ elif row.startswith('S.M.A.R.T. warnings'):
+ v = row.split()[-1]
+ pd.smart_warnings = v
+ elif row.startswith('Temperature'):
+ v = row.split(':')[-1].split()[0]
+ pd.temperature = v
+ elif row.startswith('NCQ status'):
+ if pd.id and pd.state and pd.smart_warnings:
+ pds.append(pd)
+ pd = PD()
+
+ return pds
+
+
+class LD:
+ def __init__(self, ld_id, status):
+ self.id = ld_id
+ self.status = status
+
+ def data(self):
+ return {
+ 'ld_{0}_status'.format(self.id): int(self.status in BAD_LD_STATUS)
+ }
+
+
+class PD:
+ def __init__(self):
+ self.id = None
+ self.state = None
+ self.smart_warnings = None
+ self.temperature = None
+
+ def data(self):
+ data = {
+ 'pd_{0}_state'.format(self.id): int(self.state not in GOOD_PD_STATUS),
+ 'pd_{0}_smart_warnings'.format(self.id): self.smart_warnings,
+ }
+ if self.temperature and self.temperature.isdigit():
+ data['pd_{0}_temperature'.format(self.id)] = self.temperature
+
+ return data
+
+
+class Arcconf:
+ def __init__(self, arcconf):
+ self.arcconf = arcconf
+
+ def ld_info(self):
+ return [self.arcconf, 'GETCONFIG', '1', 'LD']
+
+ def pd_info(self):
+ return [self.arcconf, 'GETCONFIG', '1', 'PD']
+
+
+# TODO: hardcoded sudo...
+class SudoArcconf:
+ def __init__(self, arcconf, sudo):
+ self.arcconf = Arcconf(arcconf)
+ self.sudo = sudo
+
+ def ld_info(self):
+ return [self.sudo, '-n'] + self.arcconf.ld_info()
+
+ def pd_info(self):
+ return [self.sudo, '-n'] + self.arcconf.pd_info()
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = deepcopy(CHARTS)
+ self.use_sudo = self.configuration.get('use_sudo', True)
+ self.arcconf = None
+
+ def execute(self, command, stderr=False):
+ return self._get_raw_data(command=command, stderr=stderr)
+
+ def check(self):
+ arcconf = find_binary(ARCCONF)
+ if not arcconf:
+ self.error('can\'t locate "{0}" binary'.format(ARCCONF))
+ return False
+
+ sudo = find_binary(SUDO)
+ if self.use_sudo:
+ if not sudo:
+ self.error('can\'t locate "{0}" binary'.format(SUDO))
+ return False
+ err = self.execute([sudo, '-n', '-v'], True)
+ if err:
+ self.error(' '.join(err))
+ return False
+
+ if self.use_sudo:
+ self.arcconf = SudoArcconf(arcconf, sudo)
+ else:
+ self.arcconf = Arcconf(arcconf)
+
+ lds = self.get_lds()
+ if not lds:
+ return False
+
+ self.debug('discovered logical devices ids: {0}'.format([ld.id for ld in lds]))
+
+ pds = self.get_pds()
+ if not pds:
+ return False
+
+ self.debug('discovered physical devices ids: {0}'.format([pd.id for pd in pds]))
+
+ self.update_charts(lds, pds)
+ return True
+
+ def get_data(self):
+ data = dict()
+
+ for ld in self.get_lds():
+ data.update(ld.data())
+
+ for pd in self.get_pds():
+ data.update(pd.data())
+
+ return data
+
+ def get_lds(self):
+ raw_lds = self.execute(self.arcconf.ld_info())
+ if not raw_lds:
+ return None
+
+ lds = find_lds(raw_lds)
+ if not lds:
+ self.error('failed to parse "{0}" output'.format(' '.join(self.arcconf.ld_info())))
+ self.debug('output: {0}'.format(raw_lds))
+ return None
+ return lds
+
+ def get_pds(self):
+ raw_pds = self.execute(self.arcconf.pd_info())
+ if not raw_pds:
+ return None
+
+ pds = find_pds(raw_pds)
+ if not pds:
+ self.error('failed to parse "{0}" output'.format(' '.join(self.arcconf.pd_info())))
+ self.debug('output: {0}'.format(raw_pds))
+ return None
+ return pds
+
+ def update_charts(self, lds, pds):
+ charts = self.definitions
+ for ld in lds:
+ dim = ['ld_{0}_status'.format(ld.id), 'ld {0}'.format(ld.id)]
+ charts['ld_status']['lines'].append(dim)
+
+ for pd in pds:
+ dim = ['pd_{0}_state'.format(pd.id), 'pd {0}'.format(pd.id)]
+ charts['pd_state']['lines'].append(dim)
+
+ dim = ['pd_{0}_smart_warnings'.format(pd.id), 'pd {0}'.format(pd.id)]
+ charts['pd_smart_warnings']['lines'].append(dim)
+
+ dim = ['pd_{0}_temperature'.format(pd.id), 'pd {0}'.format(pd.id)]
+ charts['pd_temperature']['lines'].append(dim)
diff --git a/collectors/python.d.plugin/adaptec_raid/adaptec_raid.conf b/collectors/python.d.plugin/adaptec_raid/adaptec_raid.conf
new file mode 100644
index 000000000..253cbf5a9
--- /dev/null
+++ b/collectors/python.d.plugin/adaptec_raid/adaptec_raid.conf
@@ -0,0 +1,55 @@
+# netdata python.d.plugin configuration for adaptec raid
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/apache/Makefile.inc b/collectors/python.d.plugin/apache/Makefile.inc
new file mode 100644
index 000000000..70a421550
--- /dev/null
+++ b/collectors/python.d.plugin/apache/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += apache/apache.chart.py
+dist_pythonconfig_DATA += apache/apache.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += apache/README.md apache/Makefile.inc
+
diff --git a/collectors/python.d.plugin/apache/README.md b/collectors/python.d.plugin/apache/README.md
new file mode 100644
index 000000000..c6d1d126a
--- /dev/null
+++ b/collectors/python.d.plugin/apache/README.md
@@ -0,0 +1,59 @@
+# apache
+
+This module will monitor one or more Apache servers depending on configuration.
+
+**Requirements:**
+ * apache with enabled `mod_status`
+
+It produces the following charts:
+
+1. **Requests** in requests/s
+ * requests
+
+2. **Connections**
+ * connections
+
+3. **Async Connections**
+ * keepalive
+ * closing
+ * writing
+
+4. **Bandwidth** in kilobytes/s
+ * sent
+
+5. **Workers**
+ * idle
+ * busy
+
+6. **Lifetime Avg. Requests/s** in requests/s
+ * requests_sec
+
+7. **Lifetime Avg. Bandwidth/s** in kilobytes/s
+ * size_sec
+
+8. **Lifetime Avg. Response Size** in bytes/request
+ * size_req
+
+### configuration
+
+Needs only `url` to server's `server-status?auto`
+
+Here is an example for 2 servers:
+
+```yaml
+update_every : 10
+priority : 90100
+
+local:
+ url : 'http://localhost/server-status?auto'
+ retries : 20
+
+remote:
+ url : 'http://www.apache.org/server-status?auto'
+ update_every : 5
+ retries : 4
+```
+
+Without configuration, module attempts to connect to `http://localhost/server-status?auto`
+
+---
diff --git a/collectors/python.d.plugin/apache/apache.chart.py b/collectors/python.d.plugin/apache/apache.chart.py
new file mode 100644
index 000000000..d136274d0
--- /dev/null
+++ b/collectors/python.d.plugin/apache/apache.chart.py
@@ -0,0 +1,132 @@
+# -*- coding: utf-8 -*-
+# Description: apache netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from bases.FrameworkServices.UrlService import UrlService
+
+# default module values (can be overridden per job in `config`)
+# update_every = 2
+priority = 60000
+retries = 60
+
+# default job configuration (overridden by python.d.plugin)
+# config = {'local': {
+# 'update_every': update_every,
+# 'retries': retries,
+# 'priority': priority,
+# 'url': 'http://www.apache.org/server-status?auto'
+# }}
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = ['requests', 'connections', 'conns_async', 'net', 'workers', 'reqpersec', 'bytespersec', 'bytesperreq']
+
+CHARTS = {
+ 'bytesperreq': {
+ 'options': [None, 'apache Lifetime Avg. Response Size', 'bytes/request',
+ 'statistics', 'apache.bytesperreq', 'area'],
+ 'lines': [
+ ['size_req']
+ ]},
+ 'workers': {
+ 'options': [None, 'apache Workers', 'workers', 'workers', 'apache.workers', 'stacked'],
+ 'lines': [
+ ['idle'],
+ ['busy'],
+ ]},
+ 'reqpersec': {
+ 'options': [None, 'apache Lifetime Avg. Requests/s', 'requests/s', 'statistics',
+ 'apache.reqpersec', 'area'],
+ 'lines': [
+ ['requests_sec']
+ ]},
+ 'bytespersec': {
+ 'options': [None, 'apache Lifetime Avg. Bandwidth/s', 'kilobits/s', 'statistics',
+ 'apache.bytesperreq', 'area'],
+ 'lines': [
+ ['size_sec', None, 'absolute', 8, 1000]
+ ]},
+ 'requests': {
+ 'options': [None, 'apache Requests', 'requests/s', 'requests', 'apache.requests', 'line'],
+ 'lines': [
+ ['requests', None, 'incremental']
+ ]},
+ 'net': {
+ 'options': [None, 'apache Bandwidth', 'kilobits/s', 'bandwidth', 'apache.net', 'area'],
+ 'lines': [
+ ['sent', None, 'incremental', 8, 1]
+ ]},
+ 'connections': {
+ 'options': [None, 'apache Connections', 'connections', 'connections', 'apache.connections', 'line'],
+ 'lines': [
+ ['connections']
+ ]},
+ 'conns_async': {
+ 'options': [None, 'apache Async Connections', 'connections', 'connections', 'apache.conns_async', 'stacked'],
+ 'lines': [
+ ['keepalive'],
+ ['closing'],
+ ['writing']
+ ]}
+}
+
+ASSIGNMENT = {
+ 'BytesPerReq': 'size_req',
+ 'IdleWorkers': 'idle',
+ 'IdleServers': 'idle_servers',
+ 'BusyWorkers': 'busy',
+ 'BusyServers': 'busy_servers',
+ 'ReqPerSec': 'requests_sec',
+ 'BytesPerSec': 'size_sec',
+ 'Total Accesses': 'requests',
+ 'Total kBytes': 'sent',
+ 'ConnsTotal': 'connections',
+ 'ConnsAsyncKeepAlive': 'keepalive',
+ 'ConnsAsyncClosing': 'closing',
+ 'ConnsAsyncWriting': 'writing'
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.url = self.configuration.get('url', 'http://localhost/server-status?auto')
+
+ def check(self):
+ self._manager = self._build_manager()
+ data = self._get_data()
+ if not data:
+ return None
+
+ if 'idle_servers' in data:
+ self.module_name = 'lighttpd'
+ for chart in self.definitions:
+ if chart == 'workers':
+ lines = self.definitions[chart]['lines']
+ lines[0] = ['idle_servers', 'idle']
+ lines[1] = ['busy_servers', 'busy']
+ opts = self.definitions[chart]['options']
+ opts[1] = opts[1].replace('apache', 'lighttpd')
+ opts[4] = opts[4].replace('apache', 'lighttpd')
+ return True
+
+ def _get_data(self):
+ """
+ Format data received from http request
+ :return: dict
+ """
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+ data = dict()
+
+ for row in raw_data.split('\n'):
+ tmp = row.split(':')
+ if tmp[0] in ASSIGNMENT:
+ try:
+ data[ASSIGNMENT[tmp[0]]] = int(float(tmp[1]))
+ except (IndexError, ValueError):
+ continue
+ return data or None
diff --git a/collectors/python.d.plugin/apache/apache.conf b/collectors/python.d.plugin/apache/apache.conf
new file mode 100644
index 000000000..8b606f7e0
--- /dev/null
+++ b/collectors/python.d.plugin/apache/apache.conf
@@ -0,0 +1,87 @@
+# netdata python.d.plugin configuration for apache
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, apache also supports the following:
+#
+# url: 'URL' # the URL to fetch apache's mod_status stats
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ url : 'http://localhost/server-status?auto'
+
+localipv4:
+ name : 'local'
+ url : 'http://127.0.0.1/server-status?auto'
+
+localipv6:
+ name : 'local'
+ url : 'http://[::1]/server-status?auto'
diff --git a/collectors/python.d.plugin/beanstalk/Makefile.inc b/collectors/python.d.plugin/beanstalk/Makefile.inc
new file mode 100644
index 000000000..4bbb7087d
--- /dev/null
+++ b/collectors/python.d.plugin/beanstalk/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += beanstalk/beanstalk.chart.py
+dist_pythonconfig_DATA += beanstalk/beanstalk.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += beanstalk/README.md beanstalk/Makefile.inc
+
diff --git a/collectors/python.d.plugin/beanstalk/README.md b/collectors/python.d.plugin/beanstalk/README.md
new file mode 100644
index 000000000..c2d7d5787
--- /dev/null
+++ b/collectors/python.d.plugin/beanstalk/README.md
@@ -0,0 +1,103 @@
+# beanstalk
+
+Module provides server and tube-level statistics:
+
+**Requirements:**
+ * `python-beanstalkc`
+
+**Server statistics:**
+
+1. **Cpu usage** in cpu time
+ * user
+ * system
+
+2. **Jobs rate** in jobs/s
+ * total
+ * timeouts
+
+3. **Connections rate** in connections/s
+ * connections
+
+4. **Commands rate** in commands/s
+ * put
+ * peek
+ * peek-ready
+ * peek-delayed
+ * peek-buried
+ * reserve
+ * use
+ * watch
+ * ignore
+ * delete
+ * release
+ * bury
+ * kick
+ * stats
+ * stats-job
+ * stats-tube
+ * list-tubes
+ * list-tube-used
+ * list-tubes-watched
+ * pause-tube
+
+5. **Current tubes** in tubes
+ * tubes
+
+6. **Current jobs** in jobs
+ * urgent
+ * ready
+ * reserved
+ * delayed
+ * buried
+
+7. **Current connections** in connections
+ * written
+ * producers
+ * workers
+ * waiting
+
+8. **Binlog** in records/s
+ * written
+ * migrated
+
+9. **Uptime** in seconds
+ * uptime
+
+**Per tube statistics:**
+
+1. **Jobs rate** in jobs/s
+ * jobs
+
+2. **Jobs** in jobs
+ * using
+ * ready
+ * reserved
+ * delayed
+ * buried
+
+3. **Connections** in connections
+ * using
+ * waiting
+ * watching
+
+4. **Commands** in commands/s
+ * deletes
+ * pauses
+
+5. **Pause** in seconds
+ * since
+ * left
+
+
+### configuration
+
+Sample:
+
+```yaml
+host : '127.0.0.1'
+port : 11300
+```
+
+If no configuration is given, module will attempt to connect to beanstalkd on `127.0.0.1:11300` address
+
+---
diff --git a/collectors/python.d.plugin/beanstalk/beanstalk.chart.py b/collectors/python.d.plugin/beanstalk/beanstalk.chart.py
new file mode 100644
index 000000000..1472b4e1a
--- /dev/null
+++ b/collectors/python.d.plugin/beanstalk/beanstalk.chart.py
@@ -0,0 +1,247 @@
+# -*- coding: utf-8 -*-
+# Description: beanstalk netdata python.d module
+# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+try:
+ import beanstalkc
+ BEANSTALKC = True
+except ImportError:
+ BEANSTALKC = False
+
+from bases.FrameworkServices.SimpleService import SimpleService
+from bases.loaders import safe_load
+
+# default module values (can be overridden per job in `config`)
+# update_every = 2
+priority = 60000
+retries = 60
+
+ORDER = ['cpu_usage', 'jobs_rate', 'connections_rate', 'commands_rate', 'current_tubes', 'current_jobs',
+ 'current_connections', 'binlog', 'uptime']
+
+CHARTS = {
+ 'cpu_usage': {
+ 'options': [None, 'Cpu Usage', 'cpu time', 'server statistics', 'beanstalk.cpu_usage', 'area'],
+ 'lines': [
+ ['rusage-utime', 'user', 'incremental'],
+ ['rusage-stime', 'system', 'incremental']
+ ]
+ },
+ 'jobs_rate': {
+ 'options': [None, 'Jobs Rate', 'jobs/s', 'server statistics', 'beanstalk.jobs_rate', 'line'],
+ 'lines': [
+ ['total-jobs', 'total', 'incremental'],
+ ['job-timeouts', 'timeouts', 'incremental']
+ ]
+ },
+ 'connections_rate': {
+ 'options': [None, 'Connections Rate', 'connections/s', 'server statistics', 'beanstalk.connections_rate',
+ 'area'],
+ 'lines': [
+ ['total-connections', 'connections', 'incremental']
+ ]
+ },
+ 'commands_rate': {
+ 'options': [None, 'Commands Rate', 'commands/s', 'server statistics', 'beanstalk.commands_rate', 'stacked'],
+ 'lines': [
+ ['cmd-put', 'put', 'incremental'],
+ ['cmd-peek', 'peek', 'incremental'],
+ ['cmd-peek-ready', 'peek-ready', 'incremental'],
+ ['cmd-peek-delayed', 'peek-delayed', 'incremental'],
+ ['cmd-peek-buried', 'peek-buried', 'incremental'],
+ ['cmd-reserve', 'reserve', 'incremental'],
+ ['cmd-use', 'use', 'incremental'],
+ ['cmd-watch', 'watch', 'incremental'],
+ ['cmd-ignore', 'ignore', 'incremental'],
+ ['cmd-delete', 'delete', 'incremental'],
+ ['cmd-release', 'release', 'incremental'],
+ ['cmd-bury', 'bury', 'incremental'],
+ ['cmd-kick', 'kick', 'incremental'],
+ ['cmd-stats', 'stats', 'incremental'],
+ ['cmd-stats-job', 'stats-job', 'incremental'],
+ ['cmd-stats-tube', 'stats-tube', 'incremental'],
+ ['cmd-list-tubes', 'list-tubes', 'incremental'],
+ ['cmd-list-tube-used', 'list-tube-used', 'incremental'],
+ ['cmd-list-tubes-watched', 'list-tubes-watched', 'incremental'],
+ ['cmd-pause-tube', 'pause-tube', 'incremental']
+ ]
+ },
+ 'current_tubes': {
+ 'options': [None, 'Current Tubes', 'tubes', 'server statistics', 'beanstalk.current_tubes', 'area'],
+ 'lines': [
+ ['current-tubes', 'tubes']
+ ]
+ },
+ 'current_jobs': {
+ 'options': [None, 'Current Jobs', 'jobs', 'server statistics', 'beanstalk.current_jobs', 'stacked'],
+ 'lines': [
+ ['current-jobs-urgent', 'urgent'],
+ ['current-jobs-ready', 'ready'],
+ ['current-jobs-reserved', 'reserved'],
+ ['current-jobs-delayed', 'delayed'],
+ ['current-jobs-buried', 'buried']
+ ]
+ },
+ 'current_connections': {
+ 'options': [None, 'Current Connections', 'connections', 'server statistics',
+ 'beanstalk.current_connections', 'line'],
+ 'lines': [
+ ['current-connections', 'written'],
+ ['current-producers', 'producers'],
+ ['current-workers', 'workers'],
+ ['current-waiting', 'waiting']
+ ]
+ },
+ 'binlog': {
+ 'options': [None, 'Binlog', 'records/s', 'server statistics', 'beanstalk.binlog', 'line'],
+ 'lines': [
+ ['binlog-records-written', 'written', 'incremental'],
+ ['binlog-records-migrated', 'migrated', 'incremental']
+ ]
+ },
+ 'uptime': {
+ 'options': [None, 'Uptime', 'seconds', 'server statistics', 'beanstalk.uptime', 'line'],
+ 'lines': [
+ ['uptime'],
+ ]
+ }
+}
+
+
+def tube_chart_template(name):
+ order = [
+ '{0}_jobs_rate'.format(name),
+ '{0}_jobs'.format(name),
+ '{0}_connections'.format(name),
+ '{0}_commands'.format(name),
+ '{0}_pause'.format(name)
+ ]
+ family = 'tube {0}'.format(name)
+
+ charts = {
+ order[0]: {
+ 'options': [None, 'Job Rate', 'jobs/s', family, 'beanstalk.jobs_rate', 'area'],
+ 'lines': [
+ ['_'.join([name, 'total-jobs']), 'jobs', 'incremental']
+ ]
+ },
+ order[1]: {
+ 'options': [None, 'Jobs', 'jobs', family, 'beanstalk.jobs', 'stacked'],
+ 'lines': [
+ ['_'.join([name, 'current-jobs-urgent']), 'urgent'],
+ ['_'.join([name, 'current-jobs-ready']), 'ready'],
+ ['_'.join([name, 'current-jobs-reserved']), 'reserved'],
+ ['_'.join([name, 'current-jobs-delayed']), 'delayed'],
+ ['_'.join([name, 'current-jobs-buried']), 'buried']
+ ]
+ },
+ order[2]: {
+ 'options': [None, 'Connections', 'connections', family, 'beanstalk.connections', 'stacked'],
+ 'lines': [
+ ['_'.join([name, 'current-using']), 'using'],
+ ['_'.join([name, 'current-waiting']), 'waiting'],
+ ['_'.join([name, 'current-watching']), 'watching']
+ ]
+ },
+ order[3]: {
+ 'options': [None, 'Commands', 'commands/s', family, 'beanstalk.commands', 'stacked'],
+ 'lines': [
+ ['_'.join([name, 'cmd-delete']), 'deletes', 'incremental'],
+ ['_'.join([name, 'cmd-pause-tube']), 'pauses', 'incremental']
+ ]
+ },
+ order[4]: {
+ 'options': [None, 'Pause', 'seconds', family, 'beanstalk.pause', 'stacked'],
+ 'lines': [
+ ['_'.join([name, 'pause']), 'since'],
+ ['_'.join([name, 'pause-time-left']), 'left']
+ ]
+ }
+ }
+
+ return order, charts
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.configuration = configuration
+ self.order = list(ORDER)
+ self.definitions = dict(CHARTS)
+ self.conn = None
+ self.alive = True
+
+ def check(self):
+ if not BEANSTALKC:
+ self.error("'beanstalkc' module is needed to use beanstalk.chart.py")
+ return False
+
+ self.conn = self.connect()
+
+ return True if self.conn else False
+
+ def get_data(self):
+ """
+ :return: dict
+ """
+ if not self.is_alive():
+ return None
+
+ active_charts = self.charts.active_charts()
+ data = dict()
+
+ try:
+ data.update(self.conn.stats())
+
+ for tube in self.conn.tubes():
+ stats = self.conn.stats_tube(tube)
+
+ if tube + '_jobs_rate' not in active_charts:
+ self.create_new_tube_charts(tube)
+
+ for stat in stats:
+ data['_'.join([tube, stat])] = stats[stat]
+
+ except beanstalkc.SocketError:
+ self.alive = False
+ return None
+
+ return data or None
+
+ def create_new_tube_charts(self, tube):
+ order, charts = tube_chart_template(tube)
+
+ for chart_name in order:
+ params = [chart_name] + charts[chart_name]['options']
+ dimensions = charts[chart_name]['lines']
+
+ new_chart = self.charts.add_chart(params)
+ for dimension in dimensions:
+ new_chart.add_dimension(dimension)
+
+ def connect(self):
+ host = self.configuration.get('host', '127.0.0.1')
+ port = self.configuration.get('port', 11300)
+ timeout = self.configuration.get('timeout', 1)
+ try:
+ return beanstalkc.Connection(host=host,
+ port=port,
+ connect_timeout=timeout,
+ parse_yaml=safe_load)
+ except beanstalkc.SocketError as error:
+ self.error('Connection to {0}:{1} failed: {2}'.format(host, port, error))
+ return None
+
+ def reconnect(self):
+ try:
+ self.conn.reconnect()
+ self.alive = True
+ return True
+ except beanstalkc.SocketError:
+ return False
+
+ def is_alive(self):
+ if not self.alive:
+ return self.reconnect()
+ return True
diff --git a/collectors/python.d.plugin/beanstalk/beanstalk.conf b/collectors/python.d.plugin/beanstalk/beanstalk.conf
new file mode 100644
index 000000000..940801877
--- /dev/null
+++ b/collectors/python.d.plugin/beanstalk/beanstalk.conf
@@ -0,0 +1,80 @@
+# netdata python.d.plugin configuration for beanstalk
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# chart_cleanup sets the default chart cleanup interval in iterations.
+# A chart is marked as obsolete if it has not been updated
+# 'chart_cleanup' iterations in a row.
+# When a plugin sends the obsolete flag, the charts are not deleted
+# from netdata immediately.
+# They will be hidden immediately (not offered to dashboard viewer,
+# streamed upstream and archived to backends) and deleted one hour
+# later (configurable from netdata.conf).
+# chart_cleanup: 10
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+# chart_cleanup: 10 # the JOB's chart cleanup interval in iterations
+#
+# Additionally to the above, apache also supports the following:
+#
+# host: 'host' # Server ip address or hostname. Default: 127.0.0.1
+# port: port # Beanstalkd port. Default:
+#
+# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/bind_rndc/Makefile.inc b/collectors/python.d.plugin/bind_rndc/Makefile.inc
new file mode 100644
index 000000000..72f391492
--- /dev/null
+++ b/collectors/python.d.plugin/bind_rndc/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += bind_rndc/bind_rndc.chart.py
+dist_pythonconfig_DATA += bind_rndc/bind_rndc.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += bind_rndc/README.md bind_rndc/Makefile.inc
+
diff --git a/collectors/python.d.plugin/bind_rndc/README.md b/collectors/python.d.plugin/bind_rndc/README.md
new file mode 100644
index 000000000..688297ab3
--- /dev/null
+++ b/collectors/python.d.plugin/bind_rndc/README.md
@@ -0,0 +1,60 @@
+# bind_rndc
+
+Module parses bind dump file to collect real-time performance metrics
+
+**Requirements:**
+ * Version of bind must be 9.6 +
+ * Netdata must have permissions to run `rndc stats`
+
+It produces:
+
+1. **Name server statistics**
+ * requests
+ * responses
+ * success
+ * auth_answer
+ * nonauth_answer
+ * nxrrset
+ * failure
+ * nxdomain
+ * recursion
+ * duplicate
+ * rejections
+
+2. **Incoming queries**
+ * RESERVED0
+ * A
+ * NS
+ * CNAME
+ * SOA
+ * PTR
+ * MX
+ * TXT
+ * X25
+ * AAAA
+ * SRV
+ * NAPTR
+ * A6
+ * DS
+ * RSIG
+ * DNSKEY
+ * SPF
+ * ANY
+ * DLV
+
+3. **Outgoing queries**
+ * Same as Incoming queries
+
+
+### configuration
+
+Sample:
+
+```yaml
+local:
+ named_stats_path : '/var/log/bind/named.stats'
+```
+
+If no configuration is given, module will attempt to read named.stats file at `/var/log/bind/named.stats`
+
+---
diff --git a/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py b/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py
new file mode 100644
index 000000000..423232f65
--- /dev/null
+++ b/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py
@@ -0,0 +1,240 @@
+# -*- coding: utf-8 -*-
+# Description: bind rndc netdata python.d module
+# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import os
+
+from collections import defaultdict
+from subprocess import Popen
+
+from bases.collection import find_binary
+from bases.FrameworkServices.SimpleService import SimpleService
+
+priority = 60000
+retries = 60
+update_every = 30
+
+ORDER = ['name_server_statistics', 'incoming_queries', 'outgoing_queries', 'named_stats_size']
+
+CHARTS = {
+ 'name_server_statistics': {
+ 'options': [None, 'Name Server Statistics', 'stats', 'name server statistics',
+ 'bind_rndc.name_server_statistics', 'line'],
+ 'lines': [
+ ['nms_requests', 'requests', 'incremental'],
+ ['nms_rejected_queries', 'rejected_queries', 'incremental'],
+ ['nms_success', 'success', 'incremental'],
+ ['nms_failure', 'failure', 'incremental'],
+ ['nms_responses', 'responses', 'incremental'],
+ ['nms_duplicate', 'duplicate', 'incremental'],
+ ['nms_recursion', 'recursion', 'incremental'],
+ ['nms_nxrrset', 'nxrrset', 'incremental'],
+ ['nms_nxdomain', 'nxdomain', 'incremental'],
+ ['nms_non_auth_answer', 'non_auth_answer', 'incremental'],
+ ['nms_auth_answer', 'auth_answer', 'incremental'],
+ ['nms_dropped_queries', 'dropped_queries', 'incremental'],
+ ]},
+ 'incoming_queries': {
+ 'options': [None, 'Incoming Queries', 'queries', 'incoming queries', 'bind_rndc.incoming_queries', 'line'],
+ 'lines': [
+ ]},
+ 'outgoing_queries': {
+ 'options': [None, 'Outgoing Queries', 'queries', 'outgoing queries', 'bind_rndc.outgoing_queries', 'line'],
+ 'lines': [
+ ]},
+ 'named_stats_size': {
+ 'options': [None, 'Named Stats File Size', 'MB', 'file size', 'bind_rndc.stats_size', 'line'],
+ 'lines': [
+ ['stats_size', None, 'absolute', 1, 1 << 20]
+ ]
+ }
+}
+
+NMS = {
+ 'nms_requests': [
+ 'IPv4 requests received',
+ 'IPv6 requests received',
+ 'TCP requests received',
+ 'requests with EDNS(0) receive'
+ ],
+ 'nms_responses': [
+ 'responses sent',
+ 'truncated responses sent',
+ 'responses with EDNS(0) sent',
+ 'requests with unsupported EDNS version received'
+ ],
+ 'nms_failure': [
+ 'other query failures',
+ 'queries resulted in SERVFAIL'
+ ],
+ 'nms_auth_answer': ['queries resulted in authoritative answer'],
+ 'nms_non_auth_answer': ['queries resulted in non authoritative answer'],
+ 'nms_nxrrset': ['queries resulted in nxrrset'],
+ 'nms_success': ['queries resulted in successful answer'],
+ 'nms_nxdomain': ['queries resulted in NXDOMAIN'],
+ 'nms_recursion': ['queries caused recursion'],
+ 'nms_duplicate': ['duplicate queries received'],
+ 'nms_rejected_queries': [
+ 'auth queries rejected',
+ 'recursive queries rejected'
+ ],
+ 'nms_dropped_queries': ['queries dropped']
+}
+
+STATS = ['Name Server Statistics', 'Incoming Queries', 'Outgoing Queries']
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.named_stats_path = self.configuration.get('named_stats_path', '/var/log/bind/named.stats')
+ self.rndc = find_binary('rndc')
+ self.data = dict(nms_requests=0, nms_responses=0, nms_failure=0, nms_auth=0,
+ nms_non_auth=0, nms_nxrrset=0, nms_success=0, nms_nxdomain=0,
+ nms_recursion=0, nms_duplicate=0, nms_rejected_queries=0,
+ nms_dropped_queries=0)
+
+ def check(self):
+ if not self.rndc:
+ self.error('Can\'t locate "rndc" binary or binary is not executable by netdata')
+ return False
+
+ if not (os.path.isfile(self.named_stats_path) and os.access(self.named_stats_path, os.R_OK)):
+ self.error('Cannot access file %s' % self.named_stats_path)
+ return False
+
+ run_rndc = Popen([self.rndc, 'stats'], shell=False)
+ run_rndc.wait()
+
+ if not run_rndc.returncode:
+ return True
+ self.error('Not enough permissions to run "%s stats"' % self.rndc)
+ return False
+
+ def _get_raw_data(self):
+ """
+ Run 'rndc stats' and read last dump from named.stats
+ :return: dict
+ """
+ result = dict()
+ try:
+ current_size = os.path.getsize(self.named_stats_path)
+ run_rndc = Popen([self.rndc, 'stats'], shell=False)
+ run_rndc.wait()
+
+ if run_rndc.returncode:
+ return None
+ with open(self.named_stats_path) as named_stats:
+ named_stats.seek(current_size)
+ result['stats'] = named_stats.readlines()
+ result['size'] = current_size
+ return result
+ except (OSError, IOError):
+ return None
+
+ def _get_data(self):
+ """
+ Parse data from _get_raw_data()
+ :return: dict
+ """
+
+ raw_data = self._get_raw_data()
+
+ if raw_data is None:
+ return None
+ parsed = dict()
+ for stat in STATS:
+ parsed[stat] = parse_stats(field=stat,
+ named_stats=raw_data['stats'])
+
+ self.data.update(nms_mapper(data=parsed['Name Server Statistics']))
+
+ for elem in zip(['Incoming Queries', 'Outgoing Queries'], ['incoming_queries', 'outgoing_queries']):
+ parsed_key, chart_name = elem[0], elem[1]
+ for dimension_id, value in queries_mapper(data=parsed[parsed_key],
+ add=chart_name[:9]).items():
+
+ if dimension_id not in self.data:
+ dimension = dimension_id.replace(chart_name[:9], '')
+ if dimension_id not in self.charts[chart_name]:
+ self.charts[chart_name].add_dimension([dimension_id, dimension, 'incremental'])
+
+ self.data[dimension_id] = value
+
+ self.data['stats_size'] = raw_data['size']
+ return self.data
+
+
+def parse_stats(field, named_stats):
+ """
+ :param field: str:
+ :param named_stats: list:
+ :return: dict
+
+ Example:
+ filed: 'Incoming Queries'
+ names_stats (list of lines):
+ ++ Incoming Requests ++
+ 1405660 QUERY
+ 3 NOTIFY
+ ++ Incoming Queries ++
+ 1214961 A
+ 75 NS
+ 2 CNAME
+ 2897 SOA
+ 35544 PTR
+ 14 MX
+ 5822 TXT
+ 145974 AAAA
+ 371 SRV
+ ++ Outgoing Queries ++
+ ...
+
+ result:
+ {'A', 1214961, 'NS': 75, 'CNAME': 2, 'SOA': 2897, ...}
+ """
+ data = dict()
+ ns = iter(named_stats)
+ for line in ns:
+ if field not in line:
+ continue
+ while True:
+ try:
+ line = next(ns)
+ except StopIteration:
+ break
+ if '++' not in line:
+ if '[' in line:
+ continue
+ v, k = line.strip().split(' ', 1)
+ if k not in data:
+ data[k] = 0
+ data[k] += int(v)
+ continue
+ break
+ break
+ return data
+
+
+def nms_mapper(data):
+ """
+ :param data: dict
+ :return: dict(defaultdict)
+ """
+ result = defaultdict(int)
+ for k, v in NMS.items():
+ for elem in v:
+ result[k] += data.get(elem, 0)
+ return result
+
+
+def queries_mapper(data, add):
+ """
+ :param data: dict
+ :param add: str
+ :return: dict
+ """
+ return dict([(add + k, v) for k, v in data.items()])
diff --git a/collectors/python.d.plugin/bind_rndc/bind_rndc.conf b/collectors/python.d.plugin/bind_rndc/bind_rndc.conf
new file mode 100644
index 000000000..71958ff98
--- /dev/null
+++ b/collectors/python.d.plugin/bind_rndc/bind_rndc.conf
@@ -0,0 +1,112 @@
+# netdata python.d.plugin configuration for bind_rndc
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, bind_rndc also supports the following:
+#
+# named_stats_path: 'path to named.stats' # Default: '/var/log/bind/named.stats'
+#------------------------------------------------------------------------------------------------------------------
+# IMPORTANT Information
+#
+# BIND APPEND logs at EVERY RUN. Its NOT RECOMMENDED to set update_every below 30 sec.
+# STRONGLY RECOMMENDED to create a bind-rndc conf file for logrotate
+#
+# To set up your BIND to dump stats do the following:
+#
+# 1. add to 'named.conf.options' options {}:
+# statistics-file "/var/log/bind/named.stats";
+#
+# 2. Create bind/ directory in /var/log
+# cd /var/log/ && mkdir bind
+#
+# 3. Change owner of directory to 'bind' user
+# chown bind bind/
+#
+# 4. RELOAD (NOT restart) BIND
+# systemctl reload bind9.service
+#
+# 5. Run as a root 'rndc stats' to dump (BIND will create named.stats in new directory)
+#
+#
+# To ALLOW NETDATA TO RUN 'rndc stats' change '/etc/bind/rndc.key' group to netdata
+# chown :netdata rndc.key
+#
+# The last BUT NOT least is to create bind-rndc.conf in logrotate.d/
+# The working one
+# /var/log/bind/named.stats {
+#
+# daily
+# rotate 4
+# compress
+# delaycompress
+# create 0644 bind bind
+# missingok
+# postrotate
+# rndc reload > /dev/null
+# endscript
+# }
+#
+# To test your logrotate conf file run as root:
+#
+# logrotate /etc/logrotate.d/bind-rndc -d (debug dry-run mode)
+#
+# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/boinc/Makefile.inc b/collectors/python.d.plugin/boinc/Makefile.inc
new file mode 100644
index 000000000..319e19cfe
--- /dev/null
+++ b/collectors/python.d.plugin/boinc/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += boinc/boinc.chart.py
+dist_pythonconfig_DATA += boinc/boinc.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += boinc/README.md boinc/Makefile.inc
+
diff --git a/collectors/python.d.plugin/boinc/README.md b/collectors/python.d.plugin/boinc/README.md
new file mode 100644
index 000000000..595bcd3c0
--- /dev/null
+++ b/collectors/python.d.plugin/boinc/README.md
@@ -0,0 +1,28 @@
+# boinc
+
+This module monitors task counts for the Berkely Open Infrastructure
+Networking Computing (BOINC) distributed computing client using the same
+RPC interface that the BOINC monitoring GUI does.
+
+It provides charts tracking the total number of tasks and active tasks,
+as well as ones tracking each of the possible states for tasks.
+
+### configuration
+
+BOINC requires use of a password to access it's RPC interface. You can
+find this password in the `gui_rpc_auth.cfg` file in your BOINC directory.
+
+By default, the module will try to auto-detect the password by looking
+in `/var/lib/boinc` for this file (this is the location most Linux
+distributions use for a system-wide BOINC installation), so things may
+just work without needing configuration for the local system.
+
+You can monitor remote systems as well:
+
+```yaml
+remote:
+ hostname: some-host
+ password: some-password
+```
+
+---
diff --git a/collectors/python.d.plugin/boinc/boinc.chart.py b/collectors/python.d.plugin/boinc/boinc.chart.py
new file mode 100644
index 000000000..d14754c4b
--- /dev/null
+++ b/collectors/python.d.plugin/boinc/boinc.chart.py
@@ -0,0 +1,162 @@
+# -*- coding: utf-8 -*-
+# Description: BOINC netdata python.d module
+# Author: Austin S. Hemmelgarn (Ferroin)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import socket
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+from third_party import boinc_client
+
+
+ORDER = ['tasks', 'states', 'sched_states', 'process_states']
+
+CHARTS = {
+ 'tasks': {
+ 'options': [None, 'Overall Tasks', 'tasks', 'boinc', 'boinc.tasks', 'line'],
+ 'lines': [
+ ['total', 'Total', 'absolute', 1, 1],
+ ['active', 'Active', 'absolute', 1, 1]
+ ]
+ },
+ 'states': {
+ 'options': [None, 'Tasks per State', 'tasks', 'boinc', 'boinc.states', 'line'],
+ 'lines': [
+ ['new', 'New', 'absolute', 1, 1],
+ ['downloading', 'Downloading', 'absolute', 1, 1],
+ ['downloaded', 'Ready to Run', 'absolute', 1, 1],
+ ['comperror', 'Compute Errors', 'absolute', 1, 1],
+ ['uploading', 'Uploading', 'absolute', 1, 1],
+ ['uploaded', 'Uploaded', 'absolute', 1, 1],
+ ['aborted', 'Aborted', 'absolute', 1, 1],
+ ['upload_failed', 'Failed Uploads', 'absolute', 1, 1]
+ ]
+ },
+ 'sched_states': {
+ 'options': [None, 'Tasks per Scheduler State', 'tasks', 'boinc', 'boinc.sched', 'line'],
+ 'lines': [
+ ['uninit_sched', 'Uninitialized', 'absolute', 1, 1],
+ ['preempted', 'Preempted', 'absolute', 1, 1],
+ ['scheduled', 'Scheduled', 'absolute', 1, 1]
+ ]
+ },
+ 'process_states': {
+ 'options': [None, 'Tasks per Process State', 'tasks', 'boinc', 'boinc.process', 'line'],
+ 'lines': [
+ ['uninit_proc', 'Uninitialized', 'absolute', 1, 1],
+ ['executing', 'Executing', 'absolute', 1, 1],
+ ['suspended', 'Suspended', 'absolute', 1, 1],
+ ['aborting', 'Aborted', 'absolute', 1, 1],
+ ['quit', 'Quit', 'absolute', 1, 1],
+ ['copy_pending', 'Copy Pending', 'absolute', 1, 1]
+ ]
+ }
+}
+
+# A simple template used for pre-loading the return dictionary to make
+# the _get_data() method simpler.
+_DATA_TEMPLATE = {
+ 'total': 0,
+ 'active': 0,
+ 'new': 0,
+ 'downloading': 0,
+ 'downloaded': 0,
+ 'comperror': 0,
+ 'uploading': 0,
+ 'uploaded': 0,
+ 'aborted': 0,
+ 'upload_failed': 0,
+ 'uninit_sched': 0,
+ 'preempted': 0,
+ 'scheduled': 0,
+ 'uninit_proc': 0,
+ 'executing': 0,
+ 'suspended': 0,
+ 'aborting': 0,
+ 'quit': 0,
+ 'copy_pending': 0
+}
+
+# Map task states to dimensions
+_TASK_MAP = {
+ boinc_client.ResultState.NEW: 'new',
+ boinc_client.ResultState.FILES_DOWNLOADING: 'downloading',
+ boinc_client.ResultState.FILES_DOWNLOADED: 'downloaded',
+ boinc_client.ResultState.COMPUTE_ERROR: 'comperror',
+ boinc_client.ResultState.FILES_UPLOADING: 'uploading',
+ boinc_client.ResultState.FILES_UPLOADED: 'uploaded',
+ boinc_client.ResultState.ABORTED: 'aborted',
+ boinc_client.ResultState.UPLOAD_FAILED: 'upload_failed'
+}
+
+# Map scheduler states to dimensions
+_SCHED_MAP = {
+ boinc_client.CpuSched.UNINITIALIZED: 'uninit_sched',
+ boinc_client.CpuSched.PREEMPTED: 'preempted',
+ boinc_client.CpuSched.SCHEDULED: 'scheduled',
+}
+
+# Maps process states to dimensions
+_PROC_MAP = {
+ boinc_client.Process.UNINITIALIZED: 'uninit_proc',
+ boinc_client.Process.EXECUTING: 'executing',
+ boinc_client.Process.SUSPENDED: 'suspended',
+ boinc_client.Process.ABORT_PENDING: 'aborted',
+ boinc_client.Process.QUIT_PENDING: 'quit',
+ boinc_client.Process.COPY_PENDING: 'copy_pending'
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.host = self.configuration.get('host', 'localhost')
+ self.port = self.configuration.get('port', 0)
+ self.password = self.configuration.get('password', '')
+ self.client = boinc_client.BoincClient(host=self.host, port=self.port, passwd=self.password)
+ self.alive = False
+
+ def check(self):
+ return self.connect()
+
+ def connect(self):
+ self.client.connect()
+ self.alive = self.client.connected and self.client.authorized
+ return self.alive
+
+ def reconnect(self):
+ # The client class itself actually disconnects existing
+ # connections when it is told to connect, so we don't need to
+ # explicitly disconnect when we're just trying to reconnect.
+ return self.connect()
+
+ def is_alive(self):
+ if not self.alive:
+ return self.reconnect()
+ return True
+
+ def _get_data(self):
+ if not self.is_alive():
+ return None
+ data = dict(_DATA_TEMPLATE)
+ results = []
+ try:
+ results = self.client.get_tasks()
+ except socket.error:
+ self.error('Connection is dead')
+ self.alive = False
+ return None
+ for task in results:
+ data['total'] += 1
+ data[_TASK_MAP[task.state]] += 1
+ try:
+ if task.active_task:
+ data['active'] += 1
+ data[_SCHED_MAP[task.scheduler_state]] += 1
+ data[_PROC_MAP[task.active_task_state]] += 1
+ except AttributeError:
+ pass
+ return data
diff --git a/collectors/python.d.plugin/boinc/boinc.conf b/collectors/python.d.plugin/boinc/boinc.conf
new file mode 100644
index 000000000..e59d2509d
--- /dev/null
+++ b/collectors/python.d.plugin/boinc/boinc.conf
@@ -0,0 +1,68 @@
+# netdata python.d.plugin configuration for boinc
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, boinc also supports the following:
+#
+# hostname: localhost # The host running the BOINC client
+# port: 31416 # The remote GUI RPC port for BOINC
+# password: '' # The remote GUI RPC password
diff --git a/collectors/python.d.plugin/ceph/Makefile.inc b/collectors/python.d.plugin/ceph/Makefile.inc
new file mode 100644
index 000000000..15b039ef6
--- /dev/null
+++ b/collectors/python.d.plugin/ceph/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += ceph/ceph.chart.py
+dist_pythonconfig_DATA += ceph/ceph.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += ceph/README.md ceph/Makefile.inc
+
diff --git a/collectors/python.d.plugin/ceph/README.md b/collectors/python.d.plugin/ceph/README.md
new file mode 100644
index 000000000..29dfe5d1d
--- /dev/null
+++ b/collectors/python.d.plugin/ceph/README.md
@@ -0,0 +1,32 @@
+# ceph
+
+This module monitors the ceph cluster usage and consuption data of a server.
+
+It produces:
+
+* Cluster statistics (usage, available, latency, objects, read/write rate)
+* OSD usage
+* OSD latency
+* Pool usage
+* Pool read/write operations
+* Pool read/write rate
+* number of objects per pool
+
+**Requirements:**
+
+- `rados` python module
+- Granting read permissions to ceph group from keyring file
+```shell
+# chmod 640 /etc/ceph/ceph.client.admin.keyring
+```
+
+### Configuration
+
+Sample:
+```yaml
+local:
+ config_file: '/etc/ceph/ceph.conf'
+ keyring_file: '/etc/ceph/ceph.client.admin.keyring'
+```
+
+---
diff --git a/collectors/python.d.plugin/ceph/ceph.chart.py b/collectors/python.d.plugin/ceph/ceph.chart.py
new file mode 100644
index 000000000..31c764d0f
--- /dev/null
+++ b/collectors/python.d.plugin/ceph/ceph.chart.py
@@ -0,0 +1,345 @@
+# -*- coding: utf-8 -*-
+# Description: ceph netdata python.d module
+# Author: Luis Eduardo (lets00)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+try:
+ import rados
+ CEPH = True
+except ImportError:
+ CEPH = False
+
+import os
+import json
+from bases.FrameworkServices.SimpleService import SimpleService
+
+# default module values (can be overridden per job in `config`)
+update_every = 10
+priority = 60000
+retries = 60
+
+ORDER = [
+ 'general_usage',
+ 'general_objects',
+ 'general_bytes',
+ 'general_operations',
+ 'general_latency',
+ 'pool_usage',
+ 'pool_objects',
+ 'pool_read_bytes',
+ 'pool_write_bytes',
+ 'pool_read_operations',
+ 'pool_write_operations',
+ 'osd_usage',
+ 'osd_apply_latency',
+ 'osd_commit_latency'
+]
+
+CHARTS = {
+ 'general_usage': {
+ 'options': [None, 'Ceph General Space', 'KB', 'general', 'ceph.general_usage', 'stacked'],
+ 'lines': [
+ ['general_available', 'avail', 'absolute'],
+ ['general_usage', 'used', 'absolute']
+ ]
+ },
+ 'general_objects': {
+ 'options': [None, 'Ceph General Objects', 'objects', 'general', 'ceph.general_objects', 'area'],
+ 'lines': [
+ ['general_objects', 'cluster', 'absolute']
+ ]
+ },
+ 'general_bytes': {
+ 'options': [None, 'Ceph General Read/Write Data/s', 'KB', 'general', 'ceph.general_bytes',
+ 'area'],
+ 'lines': [
+ ['general_read_bytes', 'read', 'absolute', 1, 1024],
+ ['general_write_bytes', 'write', 'absolute', -1, 1024]
+ ]
+ },
+ 'general_operations': {
+ 'options': [None, 'Ceph General Read/Write Operations/s', 'operations', 'general', 'ceph.general_operations',
+ 'area'],
+ 'lines': [
+ ['general_read_operations', 'read', 'absolute', 1],
+ ['general_write_operations', 'write', 'absolute', -1]
+ ]
+ },
+ 'general_latency': {
+ 'options': [None, 'Ceph General Apply/Commit latency', 'milliseconds', 'general', 'ceph.general_latency',
+ 'area'],
+ 'lines': [
+ ['general_apply_latency', 'apply', 'absolute'],
+ ['general_commit_latency', 'commit', 'absolute']
+ ]
+ },
+ 'pool_usage': {
+ 'options': [None, 'Ceph Pools', 'KB', 'pool', 'ceph.pool_usage', 'line'],
+ 'lines': []
+ },
+ 'pool_objects': {
+ 'options': [None, 'Ceph Pools', 'objects', 'pool', 'ceph.pool_objects', 'line'],
+ 'lines': []
+ },
+ 'pool_read_bytes': {
+ 'options': [None, 'Ceph Read Pool Data/s', 'KB', 'pool', 'ceph.pool_read_bytes', 'area'],
+ 'lines': []
+ },
+ 'pool_write_bytes': {
+ 'options': [None, 'Ceph Write Pool Data/s', 'KB', 'pool', 'ceph.pool_write_bytes', 'area'],
+ 'lines': []
+ },
+ 'pool_read_operations': {
+ 'options': [None, 'Ceph Read Pool Operations/s', 'operations', 'pool', 'ceph.pool_read_operations', 'area'],
+ 'lines': []
+ },
+ 'pool_write_operations': {
+ 'options': [None, 'Ceph Write Pool Operations/s', 'operations', 'pool', 'ceph.pool_write_operations', 'area'],
+ 'lines': []
+ },
+ 'osd_usage': {
+ 'options': [None, 'Ceph OSDs', 'KB', 'osd', 'ceph.osd_usage', 'line'],
+ 'lines': []
+ },
+ 'osd_apply_latency': {
+ 'options': [None, 'Ceph OSDs apply latency', 'milliseconds', 'osd', 'ceph.apply_latency', 'line'],
+ 'lines': []
+ },
+ 'osd_commit_latency': {
+ 'options': [None, 'Ceph OSDs commit latency', 'milliseconds', 'osd', 'ceph.commit_latency', 'line'],
+ 'lines': []
+ }
+
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.config_file = self.configuration.get('config_file')
+ self.keyring_file = self.configuration.get('keyring_file')
+
+ def check(self):
+ """
+ Checks module
+ :return:
+ """
+ if not CEPH:
+ self.error('rados module is needed to use ceph.chart.py')
+ return False
+ if not (self.config_file and self.keyring_file):
+ self.error('config_file and/or keyring_file is not defined')
+ return False
+
+ # Verify files and permissions
+ if not (os.access(self.config_file, os.F_OK)):
+ self.error('{0} does not exist'.format(self.config_file))
+ return False
+ if not (os.access(self.keyring_file, os.F_OK)):
+ self.error('{0} does not exist'.format(self.keyring_file))
+ return False
+ if not (os.access(self.config_file, os.R_OK)):
+ self.error('Ceph plugin does not read {0}, define read permission.'.format(self.config_file))
+ return False
+ if not (os.access(self.keyring_file, os.R_OK)):
+ self.error('Ceph plugin does not read {0}, define read permission.'.format(self.keyring_file))
+ return False
+ try:
+ self.cluster = rados.Rados(conffile=self.config_file,
+ conf=dict(keyring=self.keyring_file))
+ self.cluster.connect()
+ except rados.Error as error:
+ self.error(error)
+ return False
+ self.create_definitions()
+ return True
+
+ def create_definitions(self):
+ """
+ Create dynamically charts options
+ :return: None
+ """
+ # Pool lines
+ for pool in sorted(self._get_df()['pools']):
+ self.definitions['pool_usage']['lines'].append([pool['name'],
+ pool['name'],
+ 'absolute'])
+ self.definitions['pool_objects']['lines'].append(["obj_{0}".format(pool['name']),
+ pool['name'],
+ 'absolute'])
+ self.definitions['pool_read_bytes']['lines'].append(['read_{0}'.format(pool['name']),
+ pool['name'],
+ 'absolute', 1, 1024])
+ self.definitions['pool_write_bytes']['lines'].append(['write_{0}'.format(pool['name']),
+ pool['name'],
+ 'absolute', 1, 1024])
+ self.definitions['pool_read_operations']['lines'].append(['read_operations_{0}'.format(pool['name']),
+ pool['name'],
+ 'absolute'])
+ self.definitions['pool_write_operations']['lines'].append(['write_operations_{0}'.format(pool['name']),
+ pool['name'],
+ 'absolute'])
+
+ # OSD lines
+ for osd in sorted(self._get_osd_df()['nodes']):
+ self.definitions['osd_usage']['lines'].append([osd['name'],
+ osd['name'],
+ 'absolute'])
+ self.definitions['osd_apply_latency']['lines'].append(['apply_latency_{0}'.format(osd['name']),
+ osd['name'],
+ 'absolute'])
+ self.definitions['osd_commit_latency']['lines'].append(['commit_latency_{0}'.format(osd['name']),
+ osd['name'],
+ 'absolute'])
+
+ def get_data(self):
+ """
+ Catch all ceph data
+ :return: dict
+ """
+ try:
+ data = {}
+ df = self._get_df()
+ osd_df = self._get_osd_df()
+ osd_perf = self._get_osd_perf()
+ pool_stats = self._get_osd_pool_stats()
+ data.update(self._get_general(osd_perf, pool_stats))
+ for pool in df['pools']:
+ data.update(self._get_pool_usage(pool))
+ data.update(self._get_pool_objects(pool))
+ for pool_io in pool_stats:
+ data.update(self._get_pool_rw(pool_io))
+ for osd in osd_df['nodes']:
+ data.update(self._get_osd_usage(osd))
+ for osd_apply_commit in osd_perf['osd_perf_infos']:
+ data.update(self._get_osd_latency(osd_apply_commit))
+ return data
+ except (ValueError, AttributeError) as error:
+ self.error(error)
+ return None
+
+ def _get_general(self, osd_perf, pool_stats):
+ """
+ Get ceph's general usage
+ :return: dict
+ """
+ status = self.cluster.get_cluster_stats()
+ read_bytes_sec = 0
+ write_bytes_sec = 0
+ read_op_per_sec = 0
+ write_op_per_sec = 0
+ apply_latency = 0
+ commit_latency = 0
+
+ for pool_rw_io_b in pool_stats:
+ read_bytes_sec += pool_rw_io_b['client_io_rate'].get('read_bytes_sec', 0)
+ write_bytes_sec += pool_rw_io_b['client_io_rate'].get('write_bytes_sec', 0)
+ read_op_per_sec += pool_rw_io_b['client_io_rate'].get('read_op_per_sec', 0)
+ write_op_per_sec += pool_rw_io_b['client_io_rate'].get('write_op_per_sec', 0)
+ for perf in osd_perf['osd_perf_infos']:
+ apply_latency += perf['perf_stats']['apply_latency_ms']
+ commit_latency += perf['perf_stats']['commit_latency_ms']
+
+ return {
+ 'general_usage': int(status['kb_used']),
+ 'general_available': int(status['kb_avail']),
+ 'general_objects': int(status['num_objects']),
+ 'general_read_bytes': read_bytes_sec,
+ 'general_write_bytes': write_bytes_sec,
+ 'general_read_operations': read_op_per_sec,
+ 'general_write_operations': write_op_per_sec,
+ 'general_apply_latency': apply_latency,
+ 'general_commit_latency': commit_latency
+ }
+
+ @staticmethod
+ def _get_pool_usage(pool):
+ """
+ Process raw data into pool usage dict information
+ :return: A pool dict with pool name's key and usage bytes' value
+ """
+ return {pool['name']: pool['stats']['kb_used']}
+
+ @staticmethod
+ def _get_pool_objects(pool):
+ """
+ Process raw data into pool usage dict information
+ :return: A pool dict with pool name's key and object numbers
+ """
+ return {'obj_{0}'.format(pool['name']): pool['stats']['objects']}
+
+ @staticmethod
+ def _get_pool_rw(pool):
+ """
+ Get read/write kb and operations in a pool
+ :return: A pool dict with both read/write bytes and operations.
+ """
+ return {
+ 'read_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('read_bytes_sec', 0)),
+ 'write_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('write_bytes_sec', 0)),
+ 'read_operations_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('read_op_per_sec', 0)),
+ 'write_operations_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('write_op_per_sec', 0))
+ }
+
+ @staticmethod
+ def _get_osd_usage(osd):
+ """
+ Process raw data into osd dict information to get osd usage
+ :return: A osd dict with osd name's key and usage bytes' value
+ """
+ return {osd['name']: float(osd['kb_used'])}
+
+ @staticmethod
+ def _get_osd_latency(osd):
+ """
+ Get ceph osd apply and commit latency
+ :return: A osd dict with osd name's key with both apply and commit latency values
+ """
+ return {
+ 'apply_latency_osd.{0}'.format(osd['id']): osd['perf_stats']['apply_latency_ms'],
+ 'commit_latency_osd.{0}'.format(osd['id']): osd['perf_stats']['commit_latency_ms']
+ }
+
+ def _get_df(self):
+ """
+ Get ceph df output
+ :return: ceph df --format json
+ """
+ return json.loads(self.cluster.mon_command(json.dumps({
+ 'prefix': 'df',
+ 'format': 'json'
+ }), '')[1])
+
+ def _get_osd_df(self):
+ """
+ Get ceph osd df output
+ :return: ceph osd df --format json
+ """
+ return json.loads(self.cluster.mon_command(json.dumps({
+ 'prefix': 'osd df',
+ 'format': 'json'
+ }), '')[1])
+
+ def _get_osd_perf(self):
+ """
+ Get ceph osd performance
+ :return: ceph osd perf --format json
+ """
+ return json.loads(self.cluster.mon_command(json.dumps({
+ 'prefix': 'osd perf',
+ 'format': 'json'
+ }), '')[1])
+
+ def _get_osd_pool_stats(self):
+ """
+ Get ceph osd pool status.
+ This command is used to get information about both
+ read/write operation and bytes per second on each pool
+ :return: ceph osd pool stats --format json
+ """
+ return json.loads(self.cluster.mon_command(json.dumps({
+ 'prefix': 'osd pool stats',
+ 'format': 'json'
+ }), '')[1])
diff --git a/collectors/python.d.plugin/ceph/ceph.conf b/collectors/python.d.plugin/ceph/ceph.conf
new file mode 100644
index 000000000..78ac1e251
--- /dev/null
+++ b/collectors/python.d.plugin/ceph/ceph.conf
@@ -0,0 +1,75 @@
+# netdata python.d.plugin configuration for ceph stats
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 10
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 10 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, ceph plugin also supports the following:
+#
+# config_file: 'config_file' # Ceph config file.
+# keyring_file: 'keyring_file' # Ceph keyring file. netdata user must be added into ceph group
+# # and keyring file must be read group permission.
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+#
+config_file: '/etc/ceph/ceph.conf'
+keyring_file: '/etc/ceph/ceph.client.admin.keyring'
+
diff --git a/collectors/python.d.plugin/chrony/Makefile.inc b/collectors/python.d.plugin/chrony/Makefile.inc
new file mode 100644
index 000000000..18a805b12
--- /dev/null
+++ b/collectors/python.d.plugin/chrony/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += chrony/chrony.chart.py
+dist_pythonconfig_DATA += chrony/chrony.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += chrony/README.md chrony/Makefile.inc
+
diff --git a/collectors/python.d.plugin/chrony/README.md b/collectors/python.d.plugin/chrony/README.md
new file mode 100644
index 000000000..30636fe77
--- /dev/null
+++ b/collectors/python.d.plugin/chrony/README.md
@@ -0,0 +1,31 @@
+# chrony
+
+This module monitors the precision and statistics of a local chronyd server.
+
+It produces:
+
+* frequency
+* last offset
+* RMS offset
+* residual freq
+* root delay
+* root dispersion
+* skew
+* system time
+
+**Requirements:**
+Verify that user netdata can execute `chronyc tracking`. If necessary, update `/etc/chrony.conf`, `cmdallow`.
+
+### Configuration
+
+Sample:
+```yaml
+# data collection frequency:
+update_every: 1
+
+# chrony query command:
+local:
+ command: 'chronyc -n tracking'
+```
+
+---
diff --git a/collectors/python.d.plugin/chrony/chrony.chart.py b/collectors/python.d.plugin/chrony/chrony.chart.py
new file mode 100644
index 000000000..fd01d4e85
--- /dev/null
+++ b/collectors/python.d.plugin/chrony/chrony.chart.py
@@ -0,0 +1,110 @@
+# -*- coding: utf-8 -*-
+# Description: chrony netdata python.d module
+# Author: Dominik Schloesser (domschl)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from bases.FrameworkServices.ExecutableService import ExecutableService
+
+# default module values (can be overridden per job in `config`)
+update_every = 5
+priority = 60000
+retries = 10
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = ['system', 'offsets', 'stratum', 'root', 'frequency', 'residualfreq', 'skew']
+
+CHARTS = {
+ 'system': {
+ 'options': [None, 'Chrony System Time Deltas', 'microseconds', 'system', 'chrony.system', 'area'],
+ 'lines': [
+ ['timediff', 'system time', 'absolute', 1, 1000]
+ ]
+ },
+ 'offsets': {
+ 'options': [None, 'Chrony System Time Offsets', 'microseconds', 'system', 'chrony.offsets', 'area'],
+ 'lines': [
+ ['lastoffset', 'last offset', 'absolute', 1, 1000],
+ ['rmsoffset', 'RMS offset', 'absolute', 1, 1000]
+ ]
+ },
+ 'stratum': {
+ 'options': [None, 'Chrony Stratum', 'stratum', 'root', 'chrony.stratum', 'line'],
+ 'lines': [
+ ['stratum', None, 'absolute', 1, 1]
+ ]
+ },
+ 'root': {
+ 'options': [None, 'Chrony Root Delays', 'milliseconds', 'root', 'chrony.root', 'line'],
+ 'lines': [
+ ['rootdelay', 'delay', 'absolute', 1, 1000000],
+ ['rootdispersion', 'dispersion', 'absolute', 1, 1000000]
+ ]
+ },
+ 'frequency': {
+ 'options': [None, 'Chrony Frequency', 'ppm', 'frequencies', 'chrony.frequency', 'area'],
+ 'lines': [
+ ['frequency', None, 'absolute', 1, 1000]
+ ]
+ },
+ 'residualfreq': {
+ 'options': [None, 'Chrony Residual frequency', 'ppm', 'frequencies', 'chrony.residualfreq', 'area'],
+ 'lines': [
+ ['residualfreq', 'residual frequency', 'absolute', 1, 1000]
+ ]
+ },
+ 'skew': {
+ 'options': [None, 'Chrony Skew, error bound on frequency', 'ppm', 'frequencies', 'chrony.skew', 'area'],
+ 'lines': [
+ ['skew', None, 'absolute', 1, 1000]
+ ]
+ }
+}
+
+CHRONY = [
+ ('Frequency', 'frequency', 1e3),
+ ('Last offset', 'lastoffset', 1e9),
+ ('RMS offset', 'rmsoffset', 1e9),
+ ('Residual freq', 'residualfreq', 1e3),
+ ('Root delay', 'rootdelay', 1e9),
+ ('Root dispersion', 'rootdispersion', 1e9),
+ ('Skew', 'skew', 1e3),
+ ('Stratum', 'stratum', 1),
+ ('System time', 'timediff', 1e9)
+]
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(
+ self, configuration=configuration, name=name)
+ self.command = 'chronyc -n tracking'
+ self.order = ORDER
+ self.definitions = CHARTS
+
+ def _get_data(self):
+ """
+ Format data received from shell command
+ :return: dict
+ """
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+
+ raw_data = (line.split(':', 1) for line in raw_data)
+ parsed, data = dict(), dict()
+
+ for line in raw_data:
+ try:
+ key, value = (l.strip() for l in line)
+ except ValueError:
+ continue
+ if value:
+ parsed[key] = value.split()[0]
+
+ for key, dim_id, multiplier in CHRONY:
+ try:
+ data[dim_id] = int(float(parsed[key]) * multiplier)
+ except (KeyError, ValueError):
+ continue
+
+ return data or None
diff --git a/collectors/python.d.plugin/chrony/chrony.conf b/collectors/python.d.plugin/chrony/chrony.conf
new file mode 100644
index 000000000..9ac906b5f
--- /dev/null
+++ b/collectors/python.d.plugin/chrony/chrony.conf
@@ -0,0 +1,79 @@
+# netdata python.d.plugin configuration for chrony
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+update_every: 5
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, chrony also supports the following:
+#
+# command: 'chrony tracking' # the command to run
+#
+
+# ----------------------------------------------------------------------
+# REQUIRED chrony CONFIGURATION
+#
+# netdata will query chrony as user netdata.
+# verify that user netdata is allowed to call 'chronyc tracking'
+# Check cmdallow in chrony.conf
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+
+local:
+ command: 'chronyc -n tracking'
diff --git a/collectors/python.d.plugin/couchdb/Makefile.inc b/collectors/python.d.plugin/couchdb/Makefile.inc
new file mode 100644
index 000000000..89dfb51c7
--- /dev/null
+++ b/collectors/python.d.plugin/couchdb/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += couchdb/couchdb.chart.py
+dist_pythonconfig_DATA += couchdb/couchdb.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += couchdb/README.md couchdb/Makefile.inc
+
diff --git a/collectors/python.d.plugin/couchdb/README.md b/collectors/python.d.plugin/couchdb/README.md
new file mode 100644
index 000000000..eff8c0810
--- /dev/null
+++ b/collectors/python.d.plugin/couchdb/README.md
@@ -0,0 +1,35 @@
+# couchdb
+
+This module monitors vital statistics of a local Apache CouchDB 2.x server, including:
+
+* Overall server reads/writes
+* HTTP traffic breakdown
+ * Request methods (`GET`, `PUT`, `POST`, etc.)
+ * Response status codes (`200`, `201`, `4xx`, etc.)
+* Active server tasks
+* Replication status (CouchDB 2.1 and up only)
+* Erlang VM stats
+* Optional per-database statistics: sizes, # of docs, # of deleted docs
+
+### Configuration
+
+Sample for a local server running on port 5984:
+```yaml
+local:
+ user: 'admin'
+ pass: 'password'
+ node: 'couchdb@127.0.0.1'
+```
+
+Be sure to specify a correct admin-level username and password.
+
+You may also need to change the `node` name; this should match the value of `-name NODENAME` in your CouchDB's `etc/vm.args` file. Typically this is of the form `couchdb@fully.qualified.domain.name` in a cluster, or `couchdb@127.0.0.1` / `couchdb@localhost` for a single-node server.
+
+If you want per-database statistics, these need to be added to the configuration, separated by spaces:
+```yaml
+local:
+ ...
+ databases: 'db1 db2 db3 ...'
+```
+
+---
diff --git a/collectors/python.d.plugin/couchdb/couchdb.chart.py b/collectors/python.d.plugin/couchdb/couchdb.chart.py
new file mode 100644
index 000000000..5d6b9916f
--- /dev/null
+++ b/collectors/python.d.plugin/couchdb/couchdb.chart.py
@@ -0,0 +1,411 @@
+# -*- coding: utf-8 -*-
+# Description: couchdb netdata python.d module
+# Author: wohali <wohali@apache.org>
+# Thanks to l2isbad for good examples :)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from collections import namedtuple, defaultdict
+from json import loads
+from threading import Thread
+from socket import gethostbyname, gaierror
+try:
+ from queue import Queue
+except ImportError:
+ from Queue import Queue
+
+from bases.FrameworkServices.UrlService import UrlService
+
+# default module values (can be overridden per job in `config`)
+update_every = 1
+priority = 60000
+retries = 60
+
+METHODS = namedtuple('METHODS', ['get_data', 'url', 'stats'])
+
+OVERVIEW_STATS = [
+ 'couchdb.database_reads.value',
+ 'couchdb.database_writes.value',
+ 'couchdb.httpd.view_reads.value',
+ 'couchdb.httpd_request_methods.COPY.value',
+ 'couchdb.httpd_request_methods.DELETE.value',
+ 'couchdb.httpd_request_methods.GET.value',
+ 'couchdb.httpd_request_methods.HEAD.value',
+ 'couchdb.httpd_request_methods.OPTIONS.value',
+ 'couchdb.httpd_request_methods.POST.value',
+ 'couchdb.httpd_request_methods.PUT.value',
+ 'couchdb.httpd_status_codes.200.value',
+ 'couchdb.httpd_status_codes.201.value',
+ 'couchdb.httpd_status_codes.202.value',
+ 'couchdb.httpd_status_codes.204.value',
+ 'couchdb.httpd_status_codes.206.value',
+ 'couchdb.httpd_status_codes.301.value',
+ 'couchdb.httpd_status_codes.302.value',
+ 'couchdb.httpd_status_codes.304.value',
+ 'couchdb.httpd_status_codes.400.value',
+ 'couchdb.httpd_status_codes.401.value',
+ 'couchdb.httpd_status_codes.403.value',
+ 'couchdb.httpd_status_codes.404.value',
+ 'couchdb.httpd_status_codes.405.value',
+ 'couchdb.httpd_status_codes.406.value',
+ 'couchdb.httpd_status_codes.409.value',
+ 'couchdb.httpd_status_codes.412.value',
+ 'couchdb.httpd_status_codes.413.value',
+ 'couchdb.httpd_status_codes.414.value',
+ 'couchdb.httpd_status_codes.415.value',
+ 'couchdb.httpd_status_codes.416.value',
+ 'couchdb.httpd_status_codes.417.value',
+ 'couchdb.httpd_status_codes.500.value',
+ 'couchdb.httpd_status_codes.501.value',
+ 'couchdb.open_os_files.value',
+ 'couch_replicator.jobs.running.value',
+ 'couch_replicator.jobs.pending.value',
+ 'couch_replicator.jobs.crashed.value',
+]
+
+SYSTEM_STATS = [
+ 'context_switches',
+ 'run_queue',
+ 'ets_table_count',
+ 'reductions',
+ 'memory.atom',
+ 'memory.atom_used',
+ 'memory.binary',
+ 'memory.code',
+ 'memory.ets',
+ 'memory.other',
+ 'memory.processes',
+ 'io_input',
+ 'io_output',
+ 'os_proc_count',
+ 'process_count',
+ 'internal_replication_jobs'
+]
+
+DB_STATS = [
+ 'doc_count',
+ 'doc_del_count',
+ 'sizes.file',
+ 'sizes.external',
+ 'sizes.active'
+]
+
+ORDER = [
+ 'activity',
+ 'request_methods',
+ 'response_codes',
+ 'active_tasks',
+ 'replicator_jobs',
+ 'open_files',
+ 'db_sizes_file',
+ 'db_sizes_external',
+ 'db_sizes_active',
+ 'db_doc_counts',
+ 'db_doc_del_counts',
+ 'erlang_memory',
+ 'erlang_proc_counts',
+ 'erlang_peak_msg_queue',
+ 'erlang_reductions'
+]
+
+CHARTS = {
+ 'activity': {
+ 'options': [None, 'Overall Activity', 'req/s',
+ 'dbactivity', 'couchdb.activity', 'stacked'],
+ 'lines': [
+ ['couchdb_database_reads', 'DB reads', 'incremental'],
+ ['couchdb_database_writes', 'DB writes', 'incremental'],
+ ['couchdb_httpd_view_reads', 'View reads', 'incremental']
+ ]
+ },
+ 'request_methods': {
+ 'options': [None, 'HTTP request methods', 'req/s',
+ 'httptraffic', 'couchdb.request_methods',
+ 'stacked'],
+ 'lines': [
+ ['couchdb_httpd_request_methods_COPY', 'COPY', 'incremental'],
+ ['couchdb_httpd_request_methods_DELETE', 'DELETE', 'incremental'],
+ ['couchdb_httpd_request_methods_GET', 'GET', 'incremental'],
+ ['couchdb_httpd_request_methods_HEAD', 'HEAD', 'incremental'],
+ ['couchdb_httpd_request_methods_OPTIONS', 'OPTIONS',
+ 'incremental'],
+ ['couchdb_httpd_request_methods_POST', 'POST', 'incremental'],
+ ['couchdb_httpd_request_methods_PUT', 'PUT', 'incremental']
+ ]
+ },
+ 'response_codes': {
+ 'options': [None, 'HTTP response status codes', 'resp/s',
+ 'httptraffic', 'couchdb.response_codes',
+ 'stacked'],
+ 'lines': [
+ ['couchdb_httpd_status_codes_200', '200 OK', 'incremental'],
+ ['couchdb_httpd_status_codes_201', '201 Created', 'incremental'],
+ ['couchdb_httpd_status_codes_202', '202 Accepted', 'incremental'],
+ ['couchdb_httpd_status_codes_2xx', 'Other 2xx Success',
+ 'incremental'],
+ ['couchdb_httpd_status_codes_3xx', '3xx Redirection',
+ 'incremental'],
+ ['couchdb_httpd_status_codes_4xx', '4xx Client error',
+ 'incremental'],
+ ['couchdb_httpd_status_codes_5xx', '5xx Server error',
+ 'incremental']
+ ]
+ },
+ 'open_files': {
+ 'options': [None, 'Open files', 'files',
+ 'ops', 'couchdb.open_files', 'line'],
+ 'lines': [
+ ['couchdb_open_os_files', '# files', 'absolute']
+ ]
+ },
+ 'active_tasks': {
+ 'options': [None, 'Active task breakdown', 'tasks',
+ 'ops', 'couchdb.active_tasks', 'stacked'],
+ 'lines': [
+ ['activetasks_indexer', 'Indexer', 'absolute'],
+ ['activetasks_database_compaction', 'DB Compaction', 'absolute'],
+ ['activetasks_replication', 'Replication', 'absolute'],
+ ['activetasks_view_compaction', 'View Compaction', 'absolute']
+ ]
+ },
+ 'replicator_jobs': {
+ 'options': [None, 'Replicator job breakdown', 'jobs',
+ 'ops', 'couchdb.replicator_jobs', 'stacked'],
+ 'lines': [
+ ['couch_replicator_jobs_running', 'Running', 'absolute'],
+ ['couch_replicator_jobs_pending', 'Pending', 'absolute'],
+ ['couch_replicator_jobs_crashed', 'Crashed', 'absolute'],
+ ['internal_replication_jobs', 'Internal replication jobs',
+ 'absolute']
+ ]
+ },
+ 'erlang_memory': {
+ 'options': [None, 'Erlang VM memory usage', 'bytes',
+ 'erlang', 'couchdb.erlang_vm_memory', 'stacked'],
+ 'lines': [
+ ['memory_atom', 'atom', 'absolute'],
+ ['memory_binary', 'binaries', 'absolute'],
+ ['memory_code', 'code', 'absolute'],
+ ['memory_ets', 'ets', 'absolute'],
+ ['memory_processes', 'procs', 'absolute'],
+ ['memory_other', 'other', 'absolute']
+ ]
+ },
+ 'erlang_reductions': {
+ 'options': [None, 'Erlang reductions', 'count',
+ 'erlang', 'couchdb.reductions', 'line'],
+ 'lines': [
+ ['reductions', 'reductions', 'incremental']
+ ]
+ },
+ 'erlang_proc_counts': {
+ 'options': [None, 'Process counts', 'count',
+ 'erlang', 'couchdb.proccounts', 'line'],
+ 'lines': [
+ ['os_proc_count', 'OS procs', 'absolute'],
+ ['process_count', 'erl procs', 'absolute']
+ ]
+ },
+ 'erlang_peak_msg_queue': {
+ 'options': [None, 'Peak message queue size', 'count',
+ 'erlang', 'couchdb.peakmsgqueue',
+ 'line'],
+ 'lines': [
+ ['peak_msg_queue', 'peak size', 'absolute']
+ ]
+ },
+ # Lines for the following are added as part of check()
+ 'db_sizes_file': {
+ 'options': [None, 'Database sizes (file)', 'KB',
+ 'perdbstats', 'couchdb.db_sizes_file', 'line'],
+ 'lines': []
+ },
+ 'db_sizes_external': {
+ 'options': [None, 'Database sizes (external)', 'KB',
+ 'perdbstats', 'couchdb.db_sizes_external', 'line'],
+ 'lines': []
+ },
+ 'db_sizes_active': {
+ 'options': [None, 'Database sizes (active)', 'KB',
+ 'perdbstats', 'couchdb.db_sizes_active', 'line'],
+ 'lines': []
+ },
+ 'db_doc_counts': {
+ 'options': [None, 'Database # of docs', 'docs',
+ 'perdbstats', 'couchdb_db_doc_count', 'line'],
+ 'lines': []
+ },
+ 'db_doc_del_counts': {
+ 'options': [None, 'Database # of deleted docs', 'docs',
+ 'perdbstats', 'couchdb_db_doc_del_count', 'line'],
+ 'lines': []
+ }
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.host = self.configuration.get('host', '127.0.0.1')
+ self.port = self.configuration.get('port', 5984)
+ self.node = self.configuration.get('node', 'couchdb@127.0.0.1')
+ self.scheme = self.configuration.get('scheme', 'http')
+ self.user = self.configuration.get('user')
+ self.password = self.configuration.get('pass')
+ try:
+ self.dbs = self.configuration.get('databases').split(' ')
+ except (KeyError, AttributeError):
+ self.dbs = []
+
+ def check(self):
+ if not (self.host and self.port):
+ self.error('Host is not defined in the module configuration file')
+ return False
+ try:
+ self.host = gethostbyname(self.host)
+ except gaierror as error:
+ self.error(str(error))
+ return False
+ self.url = '{scheme}://{host}:{port}'.format(scheme=self.scheme,
+ host=self.host,
+ port=self.port)
+ stats = self.url + '/_node/{node}/_stats'.format(node=self.node)
+ active_tasks = self.url + '/_active_tasks'
+ system = self.url + '/_node/{node}/_system'.format(node=self.node)
+ self.methods = [METHODS(get_data=self._get_overview_stats,
+ url=stats,
+ stats=OVERVIEW_STATS),
+ METHODS(get_data=self._get_active_tasks_stats,
+ url=active_tasks,
+ stats=None),
+ METHODS(get_data=self._get_overview_stats,
+ url=system,
+ stats=SYSTEM_STATS),
+ METHODS(get_data=self._get_dbs_stats,
+ url=self.url,
+ stats=DB_STATS)]
+ # must initialise manager before using _get_raw_data
+ self._manager = self._build_manager()
+ self.dbs = [db for db in self.dbs
+ if self._get_raw_data(self.url + '/' + db)]
+ for db in self.dbs:
+ self.definitions['db_sizes_file']['lines'].append(
+ ['db_'+db+'_sizes_file', db, 'absolute', 1, 1000]
+ )
+ self.definitions['db_sizes_external']['lines'].append(
+ ['db_'+db+'_sizes_external', db, 'absolute', 1, 1000]
+ )
+ self.definitions['db_sizes_active']['lines'].append(
+ ['db_'+db+'_sizes_active', db, 'absolute', 1, 1000]
+ )
+ self.definitions['db_doc_counts']['lines'].append(
+ ['db_'+db+'_doc_count', db, 'absolute']
+ )
+ self.definitions['db_doc_del_counts']['lines'].append(
+ ['db_'+db+'_doc_del_count', db, 'absolute']
+ )
+ return UrlService.check(self)
+
+ def _get_data(self):
+ threads = list()
+ queue = Queue()
+ result = dict()
+
+ for method in self.methods:
+ th = Thread(target=method.get_data,
+ args=(queue, method.url, method.stats))
+ th.start()
+ threads.append(th)
+
+ for thread in threads:
+ thread.join()
+ result.update(queue.get())
+
+ # self.info('couchdb result = ' + str(result))
+ return result or None
+
+ def _get_overview_stats(self, queue, url, stats):
+ raw_data = self._get_raw_data(url)
+ if not raw_data:
+ return queue.put(dict())
+ data = loads(raw_data)
+ to_netdata = self._fetch_data(raw_data=data, metrics=stats)
+ if 'message_queues' in data:
+ to_netdata['peak_msg_queue'] = get_peak_msg_queue(data)
+ return queue.put(to_netdata)
+
+ def _get_active_tasks_stats(self, queue, url, _):
+ taskdict = defaultdict(int)
+ taskdict["activetasks_indexer"] = 0
+ taskdict["activetasks_database_compaction"] = 0
+ taskdict["activetasks_replication"] = 0
+ taskdict["activetasks_view_compaction"] = 0
+ raw_data = self._get_raw_data(url)
+ if not raw_data:
+ return queue.put(dict())
+ data = loads(raw_data)
+ for task in data:
+ taskdict["activetasks_" + task["type"]] += 1
+ return queue.put(dict(taskdict))
+
+ def _get_dbs_stats(self, queue, url, stats):
+ to_netdata = {}
+ for db in self.dbs:
+ raw_data = self._get_raw_data(url + '/' + db)
+ if not raw_data:
+ continue
+ data = loads(raw_data)
+ for metric in stats:
+ value = data
+ metrics_list = metric.split('.')
+ try:
+ for m in metrics_list:
+ value = value[m]
+ except KeyError as e:
+ self.debug('cannot process ' + metric + ' for ' + db
+ + ": " + str(e))
+ continue
+ metric_name = 'db_{0}_{1}'.format(db, '_'.join(metrics_list))
+ to_netdata[metric_name] = value
+ return queue.put(to_netdata)
+
+ def _fetch_data(self, raw_data, metrics):
+ data = dict()
+ for metric in metrics:
+ value = raw_data
+ metrics_list = metric.split('.')
+ try:
+ for m in metrics_list:
+ value = value[m]
+ except KeyError as e:
+ self.debug('cannot process ' + metric + ': ' + str(e))
+ continue
+ # strip off .value from end of stat
+ if metrics_list[-1] == 'value':
+ metrics_list = metrics_list[:-1]
+ # sum up 3xx/4xx/5xx
+ if metrics_list[0:2] == ['couchdb', 'httpd_status_codes'] and \
+ int(metrics_list[2]) > 202:
+ metrics_list[2] = '{0}xx'.format(int(metrics_list[2]) // 100)
+ if '_'.join(metrics_list) in data:
+ data['_'.join(metrics_list)] += value
+ else:
+ data['_'.join(metrics_list)] = value
+ else:
+ data['_'.join(metrics_list)] = value
+ return data
+
+
+def get_peak_msg_queue(data):
+ maxsize = 0
+ queues = data['message_queues']
+ for queue in iter(queues.values()):
+ if isinstance(queue, dict) and 'count' in queue:
+ value = queue['count']
+ elif isinstance(queue, int):
+ value = queue
+ else:
+ continue
+ maxsize = max(maxsize, value)
+ return maxsize
diff --git a/collectors/python.d.plugin/couchdb/couchdb.conf b/collectors/python.d.plugin/couchdb/couchdb.conf
new file mode 100644
index 000000000..5f6e75cff
--- /dev/null
+++ b/collectors/python.d.plugin/couchdb/couchdb.conf
@@ -0,0 +1,91 @@
+# netdata python.d.plugin configuration for couchdb
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# By default, CouchDB only updates its stats every 10 seconds.
+update_every: 10
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, the couchdb plugin also supports the following:
+#
+# host: 'ipaddress' # Server ip address or hostname. Default: 127.0.0.1
+# port: 'port' # CouchDB port. Default: 15672
+# scheme: 'scheme' # http or https. Default: http
+# node: 'couchdb@127.0.0.1' # CouchDB node name. Same as -name vm.args argument.
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+# if db-specific stats are desired, place their names in databases:
+# databases: 'npm-registry animaldb'
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+#
+localhost:
+ name: 'local'
+ host: '127.0.0.1'
+ port: '5984'
+ node: 'couchdb@127.0.0.1'
+ scheme: 'http'
+# user: 'admin'
+# pass: 'password'
diff --git a/collectors/python.d.plugin/cpufreq/Makefile.inc b/collectors/python.d.plugin/cpufreq/Makefile.inc
new file mode 100644
index 000000000..d6138801d
--- /dev/null
+++ b/collectors/python.d.plugin/cpufreq/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += cpufreq/cpufreq.chart.py
+dist_pythonconfig_DATA += cpufreq/cpufreq.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += cpufreq/README.md cpufreq/Makefile.inc
+
diff --git a/collectors/python.d.plugin/cpufreq/README.md b/collectors/python.d.plugin/cpufreq/README.md
new file mode 100644
index 000000000..33891d59d
--- /dev/null
+++ b/collectors/python.d.plugin/cpufreq/README.md
@@ -0,0 +1,30 @@
+# cpufreq
+
+This module shows the current CPU frequency as set by the cpufreq kernel
+module.
+
+**Requirement:**
+You need to have `CONFIG_CPU_FREQ` and (optionally) `CONFIG_CPU_FREQ_STAT`
+enabled in your kernel.
+
+This module tries to read from one of two possible locations. On
+initialization, it tries to read the `time_in_state` files provided by
+cpufreq\_stats. If this file does not exist, or doesn't contain valid data, it
+falls back to using the more inaccurate `scaling_cur_freq` file (which only
+represents the **current** CPU frequency, and doesn't account for any state
+changes which happen between updates).
+
+It produces one chart with multiple lines (one line per core).
+
+### configuration
+
+Sample:
+
+```yaml
+sys_dir: "/sys/devices"
+```
+
+If no configuration is given, module will search for cpufreq files in `/sys/devices` directory.
+Directory is also prefixed with `NETDATA_HOST_PREFIX` if specified.
+
+---
diff --git a/collectors/python.d.plugin/cpufreq/cpufreq.chart.py b/collectors/python.d.plugin/cpufreq/cpufreq.chart.py
new file mode 100644
index 000000000..cbbab6d7f
--- /dev/null
+++ b/collectors/python.d.plugin/cpufreq/cpufreq.chart.py
@@ -0,0 +1,115 @@
+# -*- coding: utf-8 -*-
+# Description: cpufreq netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# Author: Steven Noonan (tycho)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import glob
+import os
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+# default module values (can be overridden per job in `config`)
+# update_every = 2
+
+ORDER = ['cpufreq']
+
+CHARTS = {
+ 'cpufreq': {
+ 'options': [None, 'CPU Clock', 'MHz', 'cpufreq', 'cpufreq.cpufreq', 'line'],
+ 'lines': [
+ # lines are created dynamically in `check()` method
+ ]
+ }
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ prefix = os.getenv('NETDATA_HOST_PREFIX', "")
+ if prefix.endswith('/'):
+ prefix = prefix[:-1]
+ self.sys_dir = prefix + "/sys/devices"
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.fake_name = 'cpu'
+ self.assignment = {}
+ self.accurate_exists = True
+ self.accurate_last = {}
+
+ def _get_data(self):
+ data = {}
+
+ if self.accurate_exists:
+ accurate_ok = True
+
+ for name, paths in self.assignment.items():
+ last = self.accurate_last[name]
+
+ current = {}
+ deltas = {}
+ ticks_since_last = 0
+
+ for line in open(paths['accurate'], 'r'):
+ line = list(map(int, line.split()))
+ current[line[0]] = line[1]
+ ticks = line[1] - last.get(line[0], 0)
+ ticks_since_last += ticks
+ deltas[line[0]] = line[1] - last.get(line[0], 0)
+
+ avg_freq = 0
+ if ticks_since_last != 0:
+ for frequency, ticks in deltas.items():
+ avg_freq += frequency * ticks
+ avg_freq /= ticks_since_last
+
+ data[name] = avg_freq
+ self.accurate_last[name] = current
+ if avg_freq == 0 or ticks_since_last == 0:
+ # Delta is either too large or nonexistent, fall back to
+ # less accurate reading. This can happen if we switch
+ # to/from the 'schedutil' governor, which doesn't report
+ # stats.
+ accurate_ok = False
+
+ if accurate_ok:
+ return data
+
+ for name, paths in self.assignment.items():
+ data[name] = open(paths['inaccurate'], 'r').read()
+
+ return data
+
+ def check(self):
+ try:
+ self.sys_dir = str(self.configuration['sys_dir'])
+ except (KeyError, TypeError):
+ self.error("No path specified. Using: '" + self.sys_dir + "'")
+
+ for path in glob.glob(self.sys_dir + '/system/cpu/cpu*/cpufreq/stats/time_in_state'):
+ path_elem = path.split('/')
+ cpu = path_elem[-4]
+ if cpu not in self.assignment:
+ self.assignment[cpu] = {}
+ self.assignment[cpu]['accurate'] = path
+ self.accurate_last[cpu] = {}
+
+ if not self.assignment:
+ self.accurate_exists = False
+
+ for path in glob.glob(self.sys_dir + '/system/cpu/cpu*/cpufreq/scaling_cur_freq'):
+ path_elem = path.split('/')
+ cpu = path_elem[-3]
+ if cpu not in self.assignment:
+ self.assignment[cpu] = {}
+ self.assignment[cpu]['inaccurate'] = path
+
+ if not self.assignment:
+ self.error("couldn't find a method to read cpufreq statistics")
+ return False
+
+ for name in sorted(self.assignment, key=lambda v: int(v[3:])):
+ self.definitions[ORDER[0]]['lines'].append([name, name, 'absolute', 1, 1000])
+
+ return True
diff --git a/collectors/python.d.plugin/cpufreq/cpufreq.conf b/collectors/python.d.plugin/cpufreq/cpufreq.conf
new file mode 100644
index 000000000..0890245d9
--- /dev/null
+++ b/collectors/python.d.plugin/cpufreq/cpufreq.conf
@@ -0,0 +1,43 @@
+# netdata python.d.plugin configuration for cpufreq
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# The directory to search for the file scaling_cur_freq
+sys_dir: "/sys/devices"
diff --git a/collectors/python.d.plugin/cpuidle/Makefile.inc b/collectors/python.d.plugin/cpuidle/Makefile.inc
new file mode 100644
index 000000000..66c47d3cf
--- /dev/null
+++ b/collectors/python.d.plugin/cpuidle/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += cpuidle/cpuidle.chart.py
+dist_pythonconfig_DATA += cpuidle/cpuidle.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += cpuidle/README.md cpuidle/Makefile.inc
+
diff --git a/collectors/python.d.plugin/cpuidle/README.md b/collectors/python.d.plugin/cpuidle/README.md
new file mode 100644
index 000000000..495169638
--- /dev/null
+++ b/collectors/python.d.plugin/cpuidle/README.md
@@ -0,0 +1,11 @@
+# cpuidle
+
+This module monitors the usage of CPU idle states.
+
+**Requirement:**
+Your kernel needs to have `CONFIG_CPU_IDLE` enabled.
+
+It produces one stacked chart per CPU, showing the percentage of time spent in
+each state.
+
+---
diff --git a/collectors/python.d.plugin/cpuidle/cpuidle.chart.py b/collectors/python.d.plugin/cpuidle/cpuidle.chart.py
new file mode 100644
index 000000000..feac025bf
--- /dev/null
+++ b/collectors/python.d.plugin/cpuidle/cpuidle.chart.py
@@ -0,0 +1,148 @@
+# -*- coding: utf-8 -*-
+# Description: cpuidle netdata python.d module
+# Author: Steven Noonan (tycho)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import ctypes
+import glob
+import os
+import platform
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+syscall = ctypes.CDLL('libc.so.6').syscall
+
+# default module values (can be overridden per job in `config`)
+# update_every = 2
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ prefix = os.getenv('NETDATA_HOST_PREFIX', "")
+ if prefix.endswith('/'):
+ prefix = prefix[:-1]
+ self.sys_dir = prefix + "/sys/devices/system/cpu"
+ self.schedstat_path = prefix + "/proc/schedstat"
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = []
+ self.definitions = {}
+ self.fake_name = 'cpu'
+ self.assignment = {}
+ self.last_schedstat = None
+
+ @staticmethod
+ def __gettid():
+ # This is horrendous. We need the *thread id* (not the *process id*),
+ # but there's no Python standard library way of doing that. If you need
+ # to enable this module on a non-x86 machine type, you'll have to find
+ # the Linux syscall number for gettid() and add it to the dictionary
+ # below.
+ syscalls = {
+ 'i386': 224,
+ 'x86_64': 186,
+ }
+ if platform.machine() not in syscalls:
+ return None
+ tid = syscall(syscalls[platform.machine()])
+ return tid
+
+ def __wake_cpus(self, cpus):
+ # Requires Python 3.3+. This will "tickle" each CPU to force it to
+ # update its idle counters.
+ if hasattr(os, 'sched_setaffinity'):
+ pid = self.__gettid()
+ save_affinity = os.sched_getaffinity(pid)
+ for idx in cpus:
+ os.sched_setaffinity(pid, [idx])
+ os.sched_getaffinity(pid)
+ os.sched_setaffinity(pid, save_affinity)
+
+ def __read_schedstat(self):
+ cpus = {}
+ for line in open(self.schedstat_path, 'r'):
+ if not line.startswith('cpu'):
+ continue
+ line = line.rstrip().split()
+ cpu = line[0]
+ active_time = line[7]
+ cpus[cpu] = int(active_time) // 1000
+ return cpus
+
+ def _get_data(self):
+ results = {}
+
+ # Use the kernel scheduler stats to determine how much time was spent
+ # in C0 (active).
+ schedstat = self.__read_schedstat()
+
+ # Determine if any of the CPUs are idle. If they are, then we need to
+ # tickle them in order to update their C-state residency statistics.
+ if self.last_schedstat is None:
+ needs_tickle = list(self.assignment.keys())
+ else:
+ needs_tickle = []
+ for cpu, active_time in self.last_schedstat.items():
+ delta = schedstat[cpu] - active_time
+ if delta < 1:
+ needs_tickle.append(cpu)
+
+ if needs_tickle:
+ # This line is critical for the stats to update. If we don't "tickle"
+ # idle CPUs, then the counters for those CPUs stop counting.
+ self.__wake_cpus([int(cpu[3:]) for cpu in needs_tickle])
+
+ # Re-read schedstat now that we've tickled any idlers.
+ schedstat = self.__read_schedstat()
+
+ self.last_schedstat = schedstat
+
+ for cpu, metrics in self.assignment.items():
+ update_time = schedstat[cpu]
+ results[cpu + '_active_time'] = update_time
+
+ for metric, path in metrics.items():
+ residency = int(open(path, 'r').read())
+ results[metric] = residency
+
+ return results
+
+ def check(self):
+ if self.__gettid() is None:
+ self.error('Cannot get thread ID. Stats would be completely broken.')
+ return False
+
+ for path in sorted(glob.glob(self.sys_dir + '/cpu*/cpuidle/state*/name')):
+ # ['', 'sys', 'devices', 'system', 'cpu', 'cpu0', 'cpuidle', 'state3', 'name']
+ path_elem = path.split('/')
+ cpu = path_elem[-4]
+ state = path_elem[-2]
+ statename = open(path, 'rt').read().rstrip()
+
+ orderid = '%s_cpuidle' % (cpu,)
+ if orderid not in self.definitions:
+ self.order.append(orderid)
+ active_name = '%s_active_time' % (cpu,)
+ self.definitions[orderid] = {
+ 'options': [None, 'C-state residency', 'time%', 'cpuidle', 'cpuidle.cpuidle', 'stacked'],
+ 'lines': [
+ [active_name, 'C0 (active)', 'percentage-of-incremental-row', 1, 1],
+ ],
+ }
+ self.assignment[cpu] = {}
+
+ defid = '%s_%s_time' % (orderid, state)
+
+ self.definitions[orderid]['lines'].append(
+ [defid, statename, 'percentage-of-incremental-row', 1, 1]
+ )
+
+ self.assignment[cpu][defid] = '/'.join(path_elem[:-1] + ['time'])
+
+ # Sort order by kernel-specified CPU index
+ self.order.sort(key=lambda x: int(x.split('_')[0][3:]))
+
+ if not self.definitions:
+ self.error("couldn't find cstate stats")
+ return False
+
+ return True
diff --git a/collectors/python.d.plugin/cpuidle/cpuidle.conf b/collectors/python.d.plugin/cpuidle/cpuidle.conf
new file mode 100644
index 000000000..bc276fcd2
--- /dev/null
+++ b/collectors/python.d.plugin/cpuidle/cpuidle.conf
@@ -0,0 +1,40 @@
+# netdata python.d.plugin configuration for cpuidle
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
diff --git a/collectors/python.d.plugin/dns_query_time/Makefile.inc b/collectors/python.d.plugin/dns_query_time/Makefile.inc
new file mode 100644
index 000000000..7eca3e0b6
--- /dev/null
+++ b/collectors/python.d.plugin/dns_query_time/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += dns_query_time/dns_query_time.chart.py
+dist_pythonconfig_DATA += dns_query_time/dns_query_time.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += dns_query_time/README.md dns_query_time/Makefile.inc
+
diff --git a/collectors/python.d.plugin/dns_query_time/README.md b/collectors/python.d.plugin/dns_query_time/README.md
new file mode 100644
index 000000000..3703e8aaf
--- /dev/null
+++ b/collectors/python.d.plugin/dns_query_time/README.md
@@ -0,0 +1,10 @@
+# dns_query_time
+
+This module provides DNS query time statistics.
+
+**Requirement:**
+* `python-dnspython` package
+
+It produces one aggregate chart or one chart per DNS server, showing the query time.
+
+---
diff --git a/collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py b/collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py
new file mode 100644
index 000000000..d3c3db788
--- /dev/null
+++ b/collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py
@@ -0,0 +1,145 @@
+# -*- coding: utf-8 -*-
+# Description: dns_query_time netdata python.d module
+# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from random import choice
+from socket import getaddrinfo, gaierror
+from threading import Thread
+
+try:
+ from time import monotonic as time
+except ImportError:
+ from time import time
+
+try:
+ import dns.message
+ import dns.query
+ import dns.name
+ DNS_PYTHON = True
+except ImportError:
+ DNS_PYTHON = False
+
+try:
+ from queue import Queue
+except ImportError:
+ from Queue import Queue
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+
+# default module values (can be overridden per job in `config`)
+update_every = 5
+priority = 60000
+retries = 60
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = list()
+ self.definitions = dict()
+ self.timeout = self.configuration.get('response_timeout', 4)
+ self.aggregate = self.configuration.get('aggregate', True)
+ self.domains = self.configuration.get('domains')
+ self.server_list = self.configuration.get('dns_servers')
+
+ def check(self):
+ if not DNS_PYTHON:
+ self.error('\'python-dnspython\' package is needed to use dns_query_time.chart.py')
+ return False
+
+ self.timeout = self.timeout if isinstance(self.timeout, int) else 4
+
+ if not all([self.domains, self.server_list,
+ isinstance(self.server_list, str), isinstance(self.domains, str)]):
+ self.error('server_list and domain_list can\'t be empty')
+ return False
+ else:
+ self.domains, self.server_list = self.domains.split(), self.server_list.split()
+
+ for ns in self.server_list:
+ if not check_ns(ns):
+ self.info('Bad NS: %s' % ns)
+ self.server_list.remove(ns)
+ if not self.server_list:
+ return False
+
+ data = self._get_data(timeout=1)
+
+ down_servers = [s for s in data if data[s] == -100]
+ for down in down_servers:
+ down = down[3:].replace('_', '.')
+ self.info('Removed due to non response %s' % down)
+ self.server_list.remove(down)
+ if not self.server_list:
+ return False
+
+ self.order, self.definitions = create_charts(aggregate=self.aggregate, server_list=self.server_list)
+ return True
+
+ def _get_data(self, timeout=None):
+ return dns_request(self.server_list, timeout or self.timeout, self.domains)
+
+
+def dns_request(server_list, timeout, domains):
+ threads = list()
+ que = Queue()
+ result = dict()
+
+ def dns_req(ns, t, q):
+ domain = dns.name.from_text(choice(domains))
+ request = dns.message.make_query(domain, dns.rdatatype.A)
+
+ try:
+ dns_start = time()
+ dns.query.udp(request, ns, timeout=t)
+ dns_end = time()
+ query_time = round((dns_end - dns_start) * 1000)
+ q.put({'_'.join(['ns', ns.replace('.', '_')]): query_time})
+ except dns.exception.Timeout:
+ q.put({'_'.join(['ns', ns.replace('.', '_')]): -100})
+
+ for server in server_list:
+ th = Thread(target=dns_req, args=(server, timeout, que))
+ th.start()
+ threads.append(th)
+
+ for th in threads:
+ th.join()
+ result.update(que.get())
+
+ return result
+
+
+def check_ns(ns):
+ try:
+ return getaddrinfo(ns, 'domain')[0][4][0]
+ except gaierror:
+ return False
+
+
+def create_charts(aggregate, server_list):
+ if aggregate:
+ order = ['dns_group']
+ definitions = {
+ 'dns_group': {
+ 'options': [None, 'DNS Response Time', 'ms', 'name servers', 'dns_query_time.response_time', 'line'],
+ 'lines': []
+ }
+ }
+ for ns in server_list:
+ definitions['dns_group']['lines'].append(['_'.join(['ns', ns.replace('.', '_')]), ns, 'absolute'])
+
+ return order, definitions
+ else:
+ order = [''.join(['dns_', ns.replace('.', '_')]) for ns in server_list]
+ definitions = dict()
+ for ns in server_list:
+ definitions[''.join(['dns_', ns.replace('.', '_')])] = {
+ 'options': [None, 'DNS Response Time', 'ms', ns, 'dns_query_time.response_time', 'area'],
+ 'lines': [
+ ['_'.join(['ns', ns.replace('.', '_')]), ns, 'absolute']
+ ]
+ }
+ return order, definitions
diff --git a/collectors/python.d.plugin/dns_query_time/dns_query_time.conf b/collectors/python.d.plugin/dns_query_time/dns_query_time.conf
new file mode 100644
index 000000000..d32c6db83
--- /dev/null
+++ b/collectors/python.d.plugin/dns_query_time/dns_query_time.conf
@@ -0,0 +1,71 @@
+# netdata python.d.plugin configuration for dns_query_time
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, dns_query_time also supports the following:
+#
+# dns_servers: 'dns servers' # List of dns servers to query
+# domains: 'domains' # List of domains
+# aggregate: yes/no # Aggregate all servers in one chart or not
+# response_timeout: 4 # Dns query response timeout (query = -100 if response time > response_time)
+#
+# ---------------------------------------------------------------------- \ No newline at end of file
diff --git a/collectors/python.d.plugin/dnsdist/Makefile.inc b/collectors/python.d.plugin/dnsdist/Makefile.inc
new file mode 100644
index 000000000..a53f518f0
--- /dev/null
+++ b/collectors/python.d.plugin/dnsdist/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += dnsdist/dnsdist.chart.py
+dist_pythonconfig_DATA += dnsdist/dnsdist.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += dnsdist/README.md dnsdist/Makefile.inc
+
diff --git a/collectors/python.d.plugin/dnsdist/README.md b/collectors/python.d.plugin/dnsdist/README.md
new file mode 100644
index 000000000..b646ae27c
--- /dev/null
+++ b/collectors/python.d.plugin/dnsdist/README.md
@@ -0,0 +1,54 @@
+# dnsdist
+
+Module monitor dnsdist performance and health metrics.
+
+Following charts are drawn:
+
+1. **Response latency**
+ * latency-slow
+ * latency100-1000
+ * latency50-100
+ * latency10-50
+ * latency1-10
+ * latency0-1
+
+2. **Cache performance**
+ * cache-hits
+ * cache-misses
+
+3. **ACL events**
+ * acl-drops
+ * rule-drop
+ * rule-nxdomain
+ * rule-refused
+
+4. **Noncompliant data**
+ * empty-queries
+ * no-policy
+ * noncompliant-queries
+ * noncompliant-responses
+
+5. **Queries**
+ * queries
+ * rdqueries
+ * rdqueries
+
+6. **Health**
+ * downstream-send-errors
+ * downstream-timeouts
+ * servfail-responses
+ * trunc-failures
+
+### configuration
+
+```yaml
+localhost:
+ name : 'local'
+ url : 'http://127.0.0.1:5053/jsonstat?command=stats'
+ user : 'username'
+ pass : 'password'
+ header:
+ X-API-Key: 'dnsdist-api-key'
+```
+
+---
diff --git a/collectors/python.d.plugin/dnsdist/dnsdist.chart.py b/collectors/python.d.plugin/dnsdist/dnsdist.chart.py
new file mode 100644
index 000000000..1aff3f803
--- /dev/null
+++ b/collectors/python.d.plugin/dnsdist/dnsdist.chart.py
@@ -0,0 +1,133 @@
+# -*- coding: utf-8 -*-
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from json import loads
+
+from bases.FrameworkServices.UrlService import UrlService
+
+
+ORDER = [
+ 'queries',
+ 'queries_dropped',
+ 'packets_dropped',
+ 'answers',
+ 'backend_responses',
+ 'backend_commerrors',
+ 'backend_errors',
+ 'cache',
+ 'servercpu',
+ 'servermem',
+ 'query_latency',
+ 'query_latency_avg'
+]
+
+
+CHARTS = {
+ 'queries': {
+ 'options': [None, 'Client queries received', 'queries/s', 'queries', 'dnsdist.queries', 'line'],
+ 'lines': [
+ ['queries', 'all', 'incremental'],
+ ['rdqueries', 'recursive', 'incremental'],
+ ['empty-queries', 'empty', 'incremental']
+ ]
+ },
+ 'queries_dropped': {
+ 'options': [None, 'Client queries dropped', 'queries/s', 'queries', 'dnsdist.queries_dropped', 'line'],
+ 'lines': [
+ ['rule-drop', 'rule drop', 'incremental'],
+ ['dyn-blocked', 'dynamic block', 'incremental'],
+ ['no-policy', 'no policy', 'incremental'],
+ ['noncompliant-queries', 'non compliant', 'incremental']
+ ]
+ },
+ 'packets_dropped': {
+ 'options': [None, 'Packets dropped', 'packets/s', 'packets', 'dnsdist.packets_dropped', 'line'],
+ 'lines': [
+ ['acl-drops', 'acl', 'incremental']
+ ]
+ },
+ 'answers': {
+ 'options': [None, 'Answers statistics', 'answers/s', 'answers', 'dnsdist.answers', 'line'],
+ 'lines': [
+ ['self-answered', 'self answered', 'incremental'],
+ ['rule-nxdomain', 'nxdomain', 'incremental', -1],
+ ['rule-refused', 'refused', 'incremental', -1],
+ ['trunc-failures', 'trunc failures', 'incremental', -1]
+ ]
+ },
+ 'backend_responses': {
+ 'options': [None, 'Backend responses', 'responses/s', 'backends', 'dnsdist.backend_responses', 'line'],
+ 'lines': [
+ ['responses', 'responses', 'incremental']
+ ]
+ },
+ 'backend_commerrors': {
+ 'options': [None, 'Backend Communication Errors', 'errors/s', 'backends', 'dnsdist.backend_commerrors', 'line'],
+ 'lines': [
+ ['downstream-send-errors', 'send errors', 'incremental']
+ ]
+ },
+ 'backend_errors': {
+ 'options': [None, 'Backend error responses', 'responses/s', 'backends', 'dnsdist.backend_errors', 'line'],
+ 'lines': [
+ ['downstream-timeouts', 'timeout', 'incremental'],
+ ['servfail-responses', 'servfail', 'incremental'],
+ ['noncompliant-responses', 'non compliant', 'incremental']
+ ]
+ },
+ 'cache': {
+ 'options': [None, 'Cache performance', 'answers/s', 'cache', 'dnsdist.cache', 'area'],
+ 'lines': [
+ ['cache-hits', 'hits', 'incremental'],
+ ['cache-misses', 'misses', 'incremental', -1]
+ ]
+ },
+ 'servercpu': {
+ 'options': [None, 'DNSDIST server CPU utilization', 'ms/s', 'server', 'dnsdist.servercpu', 'stacked'],
+ 'lines': [
+ ['cpu-sys-msec', 'system state', 'incremental'],
+ ['cpu-user-msec', 'user state', 'incremental']
+ ]
+ },
+ 'servermem': {
+ 'options': [None, 'DNSDIST server memory utilization', 'MB', 'server', 'dnsdist.servermem', 'area'],
+ 'lines': [
+ ['real-memory-usage', 'memory usage', 'absolute', 1, 1048576]
+ ]
+ },
+ 'query_latency': {
+ 'options': [None, 'Query latency', 'queries/s', 'latency', 'dnsdist.query_latency', 'stacked'],
+ 'lines': [
+ ['latency0-1', '1ms', 'incremental'],
+ ['latency1-10', '10ms', 'incremental'],
+ ['latency10-50', '50ms', 'incremental'],
+ ['latency50-100', '100ms', 'incremental'],
+ ['latency100-1000', '1sec', 'incremental'],
+ ['latency-slow', 'slow', 'incremental']
+ ]
+ },
+ 'query_latency_avg': {
+ 'options': [None, 'Average latency for the last N queries', 'ms/query', 'latency',
+ 'dnsdist.query_latency_avg', 'line'],
+ 'lines': [
+ ['latency-avg100', '100', 'absolute'],
+ ['latency-avg1000', '1k', 'absolute'],
+ ['latency-avg10000', '10k', 'absolute'],
+ ['latency-avg1000000', '1000k', 'absolute']
+ ]
+ }
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+
+ def _get_data(self):
+ data = self._get_raw_data()
+ if not data:
+ return None
+
+ return loads(data)
diff --git a/collectors/python.d.plugin/dnsdist/dnsdist.conf b/collectors/python.d.plugin/dnsdist/dnsdist.conf
new file mode 100644
index 000000000..aec58b8e1
--- /dev/null
+++ b/collectors/python.d.plugin/dnsdist/dnsdist.conf
@@ -0,0 +1,85 @@
+# netdata python.d.plugin configuration for dnsdist
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+#update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+#retries: 600000
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+#autodetection_retry: 1
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+#
+# Additionally to the above, dnsdist also supports the following:
+#
+# url: 'URL' # the URL to fetch dnsdist performance statistics
+# user: 'username' # username for basic auth
+# pass: 'password' # password for basic auth
+# header:
+# X-API-Key: 'Key' # API key
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+# localhost:
+# name : 'local'
+# url : 'http://127.0.0.1:5053/jsonstat?command=stats'
+# user : 'username'
+# pass : 'password'
+# header:
+# X-API-Key: 'dnsdist-api-key'
+
+
diff --git a/collectors/python.d.plugin/dockerd/Makefile.inc b/collectors/python.d.plugin/dockerd/Makefile.inc
new file mode 100644
index 000000000..b100bc6a1
--- /dev/null
+++ b/collectors/python.d.plugin/dockerd/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += dockerd/dockerd.chart.py
+dist_pythonconfig_DATA += dockerd/dockerd.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += dockerd/README.md dockerd/Makefile.inc
+
diff --git a/collectors/python.d.plugin/dockerd/README.md b/collectors/python.d.plugin/dockerd/README.md
new file mode 100644
index 000000000..d3f603808
--- /dev/null
+++ b/collectors/python.d.plugin/dockerd/README.md
@@ -0,0 +1,26 @@
+# dockerd
+
+Module monitor docker health metrics.
+
+**Requirement:**
+* `docker` package
+
+Following charts are drawn:
+
+1. **running containers**
+ * count
+
+2. **healthy containers**
+ * count
+
+3. **unhealthy containers**
+ * count
+
+### configuration
+
+```yaml
+ update_every : 1
+ priority : 60000
+ ```
+
+---
diff --git a/collectors/python.d.plugin/dockerd/dockerd.chart.py b/collectors/python.d.plugin/dockerd/dockerd.chart.py
new file mode 100644
index 000000000..a0d3d7e65
--- /dev/null
+++ b/collectors/python.d.plugin/dockerd/dockerd.chart.py
@@ -0,0 +1,77 @@
+# -*- coding: utf-8 -*-
+# Description: docker netdata python.d module
+# Author: Kévin Darcel (@tuxity)
+
+try:
+ import docker
+ HAS_DOCKER = True
+except ImportError:
+ HAS_DOCKER = False
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+# default module values (can be overridden per job in `config`)
+# update_every = 1
+priority = 60000
+retries = 60
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = [
+ 'running_containers',
+ 'healthy_containers',
+ 'unhealthy_containers'
+]
+
+CHARTS = {
+ 'running_containers': {
+ 'options': [None, 'Number of running containers', 'running containers', 'running containers',
+ 'docker.running_containers', 'line'],
+ 'lines': [
+ ['running_containers', 'running']
+ ]
+ },
+ 'healthy_containers': {
+ 'options': [None, 'Number of healthy containers', 'healthy containers', 'healthy containers',
+ 'docker.healthy_containers', 'line'],
+ 'lines': [
+ ['healthy_containers', 'healthy']
+ ]
+ },
+ 'unhealthy_containers': {
+ 'options': [None, 'Number of unhealthy containers', 'unhealthy containers', 'unhealthy containers',
+ 'docker.unhealthy_containers', 'line'],
+ 'lines': [
+ ['unhealthy_containers', 'unhealthy']
+ ]
+ }
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+
+ def check(self):
+ if not HAS_DOCKER:
+ self.error('\'docker\' package is needed to use docker.chart.py')
+ return False
+
+ self.client = docker.DockerClient(base_url=self.configuration.get('url', 'unix://var/run/docker.sock'))
+
+ try:
+ self.client.ping()
+ except docker.errors.APIError as error:
+ self.error(error)
+ return False
+
+ return True
+
+ def get_data(self):
+ data = dict()
+ data['running_containers'] = len(self.client.containers.list(sparse=True))
+ data['healthy_containers'] = len(self.client.containers.list(filters={'health': 'healthy'}, sparse=True))
+ data['unhealthy_containers'] = len(self.client.containers.list(filters={'health': 'unhealthy'}, sparse=True))
+
+ return data or None
diff --git a/collectors/python.d.plugin/dockerd/dockerd.conf b/collectors/python.d.plugin/dockerd/dockerd.conf
new file mode 100644
index 000000000..5ef17a1f5
--- /dev/null
+++ b/collectors/python.d.plugin/dockerd/dockerd.conf
@@ -0,0 +1,79 @@
+# netdata python.d.plugin configuration for dockerd health data API
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 10 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, dockerd plugin also supports the following:
+#
+# url: '<scheme>://<host>:<port>/<health_page_api>'
+# # http://localhost:8080/health
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+#
+local:
+ url: 'unix://var/run/docker.sock'
diff --git a/collectors/python.d.plugin/dovecot/Makefile.inc b/collectors/python.d.plugin/dovecot/Makefile.inc
new file mode 100644
index 000000000..fd7d13bbb
--- /dev/null
+++ b/collectors/python.d.plugin/dovecot/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += dovecot/dovecot.chart.py
+dist_pythonconfig_DATA += dovecot/dovecot.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += dovecot/README.md dovecot/Makefile.inc
+
diff --git a/collectors/python.d.plugin/dovecot/README.md b/collectors/python.d.plugin/dovecot/README.md
new file mode 100644
index 000000000..50950ecc1
--- /dev/null
+++ b/collectors/python.d.plugin/dovecot/README.md
@@ -0,0 +1,73 @@
+# dovecot
+
+This module provides statistics information from Dovecot server.
+Statistics are taken from dovecot socket by executing `EXPORT global` command.
+More information about dovecot stats can be found on [project wiki page.](http://wiki2.dovecot.org/Statistics)
+
+**Requirement:**
+Dovecot UNIX socket with R/W permissions for user netdata or Dovecot with configured TCP/IP socket.
+
+Module gives information with following charts:
+
+1. **sessions**
+ * active sessions
+
+2. **logins**
+ * logins
+
+3. **commands** - number of IMAP commands
+ * commands
+
+4. **Faults**
+ * minor
+ * major
+
+5. **Context Switches**
+ * volountary
+ * involountary
+
+6. **disk** in bytes/s
+ * read
+ * write
+
+7. **bytes** in bytes/s
+ * read
+ * write
+
+8. **number of syscalls** in syscalls/s
+ * read
+ * write
+
+9. **lookups** - number of lookups per second
+ * path
+ * attr
+
+10. **hits** - number of cache hits
+ * hits
+
+11. **attempts** - authorization attempts
+ * success
+ * failure
+
+12. **cache** - cached authorization hits
+ * hit
+ * miss
+
+### configuration
+
+Sample:
+
+```yaml
+localtcpip:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 24242
+
+localsocket:
+ name : 'local'
+ socket : '/var/run/dovecot/stats'
+```
+
+If no configuration is given, module will attempt to connect to dovecot using unix socket localized in `/var/run/dovecot/stats`
+
+---
diff --git a/collectors/python.d.plugin/dovecot/dovecot.chart.py b/collectors/python.d.plugin/dovecot/dovecot.chart.py
new file mode 100644
index 000000000..7fee3bfac
--- /dev/null
+++ b/collectors/python.d.plugin/dovecot/dovecot.chart.py
@@ -0,0 +1,147 @@
+# -*- coding: utf-8 -*-
+# Description: dovecot netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from bases.FrameworkServices.SocketService import SocketService
+
+# default module values (can be overridden per job in `config`)
+# update_every = 2
+priority = 60000
+retries = 60
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = [
+ 'sessions',
+ 'logins',
+ 'commands',
+ 'faults',
+ 'context_switches',
+ 'io',
+ 'net',
+ 'syscalls',
+ 'lookup',
+ 'cache',
+ 'auth',
+ 'auth_cache'
+]
+
+CHARTS = {
+ 'sessions': {
+ 'options': [None, 'Dovecot Active Sessions', 'number', 'sessions', 'dovecot.sessions', 'line'],
+ 'lines': [
+ ['num_connected_sessions', 'active sessions', 'absolute']
+ ]
+ },
+ 'logins': {
+ 'options': [None, 'Dovecot Logins', 'number', 'logins', 'dovecot.logins', 'line'],
+ 'lines': [
+ ['num_logins', 'logins', 'absolute']
+ ]
+ },
+ 'commands': {
+ 'options': [None, 'Dovecot Commands', 'commands', 'commands', 'dovecot.commands', 'line'],
+ 'lines': [
+ ['num_cmds', 'commands', 'absolute']
+ ]
+ },
+ 'faults': {
+ 'options': [None, 'Dovecot Page Faults', 'faults', 'page faults', 'dovecot.faults', 'line'],
+ 'lines': [
+ ['min_faults', 'minor', 'absolute'],
+ ['maj_faults', 'major', 'absolute']
+ ]
+ },
+ 'context_switches': {
+ 'options': [None, 'Dovecot Context Switches', '', 'context switches', 'dovecot.context_switches', 'line'],
+ 'lines': [
+ ['vol_cs', 'voluntary', 'absolute'],
+ ['invol_cs', 'involuntary', 'absolute']
+ ]
+ },
+ 'io': {
+ 'options': [None, 'Dovecot Disk I/O', 'kilobytes/s', 'disk', 'dovecot.io', 'area'],
+ 'lines': [
+ ['disk_input', 'read', 'incremental', 1, 1024],
+ ['disk_output', 'write', 'incremental', -1, 1024]
+ ]
+ },
+ 'net': {
+ 'options': [None, 'Dovecot Network Bandwidth', 'kilobits/s', 'network', 'dovecot.net', 'area'],
+ 'lines': [
+ ['read_bytes', 'read', 'incremental', 8, 1024],
+ ['write_bytes', 'write', 'incremental', -8, 1024]
+ ]
+ },
+ 'syscalls': {
+ 'options': [None, 'Dovecot Number of SysCalls', 'syscalls/s', 'system', 'dovecot.syscalls', 'line'],
+ 'lines': [
+ ['read_count', 'read', 'incremental'],
+ ['write_count', 'write', 'incremental']
+ ]
+ },
+ 'lookup': {
+ 'options': [None, 'Dovecot Lookups', 'number/s', 'lookups', 'dovecot.lookup', 'stacked'],
+ 'lines': [
+ ['mail_lookup_path', 'path', 'incremental'],
+ ['mail_lookup_attr', 'attr', 'incremental']
+ ]
+ },
+ 'cache': {
+ 'options': [None, 'Dovecot Cache Hits', 'hits/s', 'cache', 'dovecot.cache', 'line'],
+ 'lines': [
+ ['mail_cache_hits', 'hits', 'incremental']
+ ]
+ },
+ 'auth': {
+ 'options': [None, 'Dovecot Authentications', 'attempts', 'logins', 'dovecot.auth', 'stacked'],
+ 'lines': [
+ ['auth_successes', 'ok', 'absolute'],
+ ['auth_failures', 'failed', 'absolute']
+ ]
+ },
+ 'auth_cache': {
+ 'options': [None, 'Dovecot Authentication Cache', 'number', 'cache', 'dovecot.auth_cache', 'stacked'],
+ 'lines': [
+ ['auth_cache_hits', 'hit', 'absolute'],
+ ['auth_cache_misses', 'miss', 'absolute']
+ ]
+ }
+}
+
+
+class Service(SocketService):
+ def __init__(self, configuration=None, name=None):
+ SocketService.__init__(self, configuration=configuration, name=name)
+ self.request = 'EXPORT\tglobal\r\n'
+ self.host = None # localhost
+ self.port = None # 24242
+ # self._keep_alive = True
+ self.unix_socket = '/var/run/dovecot/stats'
+ self.order = ORDER
+ self.definitions = CHARTS
+
+ def _get_data(self):
+ """
+ Format data received from socket
+ :return: dict
+ """
+ try:
+ raw = self._get_raw_data()
+ except (ValueError, AttributeError):
+ return None
+
+ if raw is None:
+ self.debug('dovecot returned no data')
+ return None
+
+ data = raw.split('\n')[:2]
+ desc = data[0].split('\t')
+ vals = data[1].split('\t')
+ ret = dict()
+ for i, _ in enumerate(desc):
+ try:
+ ret[str(desc[i])] = int(vals[i])
+ except ValueError:
+ continue
+ return ret or None
diff --git a/collectors/python.d.plugin/dovecot/dovecot.conf b/collectors/python.d.plugin/dovecot/dovecot.conf
new file mode 100644
index 000000000..56c394991
--- /dev/null
+++ b/collectors/python.d.plugin/dovecot/dovecot.conf
@@ -0,0 +1,96 @@
+# netdata python.d.plugin configuration for dovecot
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, dovecot also supports the following:
+#
+# socket: 'path/to/dovecot/stats'
+#
+# or
+# host: 'IP or HOSTNAME' # the host to connect to
+# port: PORT # the port to connect to
+#
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ host : 'localhost'
+ port : 24242
+
+localipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 24242
+
+localipv6:
+ name : 'local'
+ host : '::1'
+ port : 24242
+
+localsocket:
+ name : 'local'
+ socket : '/var/run/dovecot/stats'
+
diff --git a/collectors/python.d.plugin/elasticsearch/Makefile.inc b/collectors/python.d.plugin/elasticsearch/Makefile.inc
new file mode 100644
index 000000000..15c63c2fa
--- /dev/null
+++ b/collectors/python.d.plugin/elasticsearch/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += elasticsearch/elasticsearch.chart.py
+dist_pythonconfig_DATA += elasticsearch/elasticsearch.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += elasticsearch/README.md elasticsearch/Makefile.inc
+
diff --git a/collectors/python.d.plugin/elasticsearch/README.md b/collectors/python.d.plugin/elasticsearch/README.md
new file mode 100644
index 000000000..75e17015b
--- /dev/null
+++ b/collectors/python.d.plugin/elasticsearch/README.md
@@ -0,0 +1,60 @@
+# elasticsearch
+
+This module monitors Elasticsearch performance and health metrics.
+
+It produces:
+
+1. **Search performance** charts:
+ * Number of queries, fetches
+ * Time spent on queries, fetches
+ * Query and fetch latency
+
+2. **Indexing performance** charts:
+ * Number of documents indexed, index refreshes, flushes
+ * Time spent on indexing, refreshing, flushing
+ * Indexing and flushing latency
+
+3. **Memory usage and garbace collection** charts:
+ * JVM heap currently in use, committed
+ * Count of garbage collections
+ * Time spent on garbage collections
+
+4. **Host metrics** charts:
+ * Available file descriptors in percent
+ * Opened HTTP connections
+ * Cluster communication transport metrics
+
+5. **Queues and rejections** charts:
+ * Number of queued/rejected threads in thread pool
+
+6. **Fielddata cache** charts:
+ * Fielddata cache size
+ * Fielddata evictions and circuit breaker tripped count
+
+7. **Cluster health API** charts:
+ * Cluster status
+ * Nodes and tasks statistics
+ * Shards statistics
+
+8. **Cluster stats API** charts:
+ * Nodes statistics
+ * Query cache statistics
+ * Docs statistics
+ * Store statistics
+ * Indices and shards statistics
+
+### configuration
+
+Sample:
+
+```yaml
+local:
+ host : 'ipaddress' # Server ip address or hostname
+ port : 'password' # Port on which elasticsearch listed
+ cluster_health : True/False # Calls to cluster health elasticsearch API. Enabled by default.
+ cluster_stats : True/False # Calls to cluster stats elasticsearch API. Enabled by default.
+```
+
+If no configuration is given, module will fail to run.
+
+---
diff --git a/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py b/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py
new file mode 100644
index 000000000..3f431f6e0
--- /dev/null
+++ b/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py
@@ -0,0 +1,644 @@
+# -*- coding: utf-8 -*-
+# Description: elastic search node stats netdata python.d module
+# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import json
+import threading
+
+from collections import namedtuple
+from socket import gethostbyname, gaierror
+
+try:
+ from queue import Queue
+except ImportError:
+ from Queue import Queue
+
+from bases.FrameworkServices.UrlService import UrlService
+
+# default module values (can be overridden per job in `config`)
+update_every = 5
+
+METHODS = namedtuple('METHODS', ['get_data', 'url', 'run'])
+
+NODE_STATS = [
+ 'indices.search.fetch_current',
+ 'indices.search.fetch_total',
+ 'indices.search.query_current',
+ 'indices.search.query_total',
+ 'indices.search.query_time_in_millis',
+ 'indices.search.fetch_time_in_millis',
+ 'indices.indexing.index_total',
+ 'indices.indexing.index_current',
+ 'indices.indexing.index_time_in_millis',
+ 'indices.refresh.total',
+ 'indices.refresh.total_time_in_millis',
+ 'indices.flush.total',
+ 'indices.flush.total_time_in_millis',
+ 'indices.translog.operations',
+ 'indices.translog.size_in_bytes',
+ 'indices.translog.uncommitted_operations',
+ 'indices.translog.uncommitted_size_in_bytes',
+ 'indices.segments.count',
+ 'indices.segments.terms_memory_in_bytes',
+ 'indices.segments.stored_fields_memory_in_bytes',
+ 'indices.segments.term_vectors_memory_in_bytes',
+ 'indices.segments.norms_memory_in_bytes',
+ 'indices.segments.points_memory_in_bytes',
+ 'indices.segments.doc_values_memory_in_bytes',
+ 'indices.segments.index_writer_memory_in_bytes',
+ 'indices.segments.version_map_memory_in_bytes',
+ 'indices.segments.fixed_bit_set_memory_in_bytes',
+ 'jvm.gc.collectors.young.collection_count',
+ 'jvm.gc.collectors.old.collection_count',
+ 'jvm.gc.collectors.young.collection_time_in_millis',
+ 'jvm.gc.collectors.old.collection_time_in_millis',
+ 'jvm.mem.heap_used_percent',
+ 'jvm.mem.heap_used_in_bytes',
+ 'jvm.mem.heap_committed_in_bytes',
+ 'jvm.buffer_pools.direct.count',
+ 'jvm.buffer_pools.direct.used_in_bytes',
+ 'jvm.buffer_pools.direct.total_capacity_in_bytes',
+ 'jvm.buffer_pools.mapped.count',
+ 'jvm.buffer_pools.mapped.used_in_bytes',
+ 'jvm.buffer_pools.mapped.total_capacity_in_bytes',
+ 'thread_pool.bulk.queue',
+ 'thread_pool.bulk.rejected',
+ 'thread_pool.write.queue',
+ 'thread_pool.write.rejected',
+ 'thread_pool.index.queue',
+ 'thread_pool.index.rejected',
+ 'thread_pool.search.queue',
+ 'thread_pool.search.rejected',
+ 'thread_pool.merge.queue',
+ 'thread_pool.merge.rejected',
+ 'indices.fielddata.memory_size_in_bytes',
+ 'indices.fielddata.evictions',
+ 'breakers.fielddata.tripped',
+ 'http.current_open',
+ 'transport.rx_size_in_bytes',
+ 'transport.tx_size_in_bytes',
+ 'process.max_file_descriptors',
+ 'process.open_file_descriptors'
+]
+
+CLUSTER_STATS = [
+ 'nodes.count.data_only',
+ 'nodes.count.master_data',
+ 'nodes.count.total',
+ 'nodes.count.master_only',
+ 'nodes.count.client',
+ 'indices.docs.count',
+ 'indices.query_cache.hit_count',
+ 'indices.query_cache.miss_count',
+ 'indices.store.size_in_bytes',
+ 'indices.count',
+ 'indices.shards.total'
+]
+
+HEALTH_STATS = [
+ 'number_of_nodes',
+ 'number_of_data_nodes',
+ 'number_of_pending_tasks',
+ 'number_of_in_flight_fetch',
+ 'active_shards',
+ 'relocating_shards',
+ 'unassigned_shards',
+ 'delayed_unassigned_shards',
+ 'initializing_shards',
+ 'active_shards_percent_as_number'
+]
+
+LATENCY = {
+ 'query_latency': {
+ 'total': 'indices_search_query_total',
+ 'spent_time': 'indices_search_query_time_in_millis'
+ },
+ 'fetch_latency': {
+ 'total': 'indices_search_fetch_total',
+ 'spent_time': 'indices_search_fetch_time_in_millis'
+ },
+ 'indexing_latency': {
+ 'total': 'indices_indexing_index_total',
+ 'spent_time': 'indices_indexing_index_time_in_millis'
+ },
+ 'flushing_latency': {
+ 'total': 'indices_flush_total',
+ 'spent_time': 'indices_flush_total_time_in_millis'
+ }
+}
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = [
+ 'search_performance_total',
+ 'search_performance_current',
+ 'search_performance_time',
+ 'search_latency',
+ 'index_performance_total',
+ 'index_performance_current',
+ 'index_performance_time',
+ 'index_latency',
+ 'index_translog_operations',
+ 'index_translog_size',
+ 'index_segments_count',
+ 'index_segments_memory_writer',
+ 'index_segments_memory',
+ 'jvm_mem_heap',
+ 'jvm_mem_heap_bytes',
+ 'jvm_buffer_pool_count',
+ 'jvm_direct_buffers_memory',
+ 'jvm_mapped_buffers_memory',
+ 'jvm_gc_count',
+ 'jvm_gc_time',
+ 'host_metrics_file_descriptors',
+ 'host_metrics_http',
+ 'host_metrics_transport',
+ 'thread_pool_queued',
+ 'thread_pool_rejected',
+ 'fielddata_cache',
+ 'fielddata_evictions_tripped',
+ 'cluster_health_status',
+ 'cluster_health_nodes',
+ 'cluster_health_shards',
+ 'cluster_stats_nodes',
+ 'cluster_stats_query_cache',
+ 'cluster_stats_docs',
+ 'cluster_stats_store',
+ 'cluster_stats_indices_shards',
+]
+
+CHARTS = {
+ 'search_performance_total': {
+ 'options': [None, 'Queries And Fetches', 'number of', 'search performance',
+ 'elastic.search_performance_total', 'stacked'],
+ 'lines': [
+ ['indices_search_query_total', 'queries', 'incremental'],
+ ['indices_search_fetch_total', 'fetches', 'incremental']
+ ]
+ },
+ 'search_performance_current': {
+ 'options': [None, 'Queries and Fetches In Progress', 'number of', 'search performance',
+ 'elastic.search_performance_current', 'stacked'],
+ 'lines': [
+ ['indices_search_query_current', 'queries', 'absolute'],
+ ['indices_search_fetch_current', 'fetches', 'absolute']
+ ]
+ },
+ 'search_performance_time': {
+ 'options': [None, 'Time Spent On Queries And Fetches', 'seconds', 'search performance',
+ 'elastic.search_performance_time', 'stacked'],
+ 'lines': [
+ ['indices_search_query_time_in_millis', 'query', 'incremental', 1, 1000],
+ ['indices_search_fetch_time_in_millis', 'fetch', 'incremental', 1, 1000]
+ ]
+ },
+ 'search_latency': {
+ 'options': [None, 'Query And Fetch Latency', 'ms', 'search performance', 'elastic.search_latency', 'stacked'],
+ 'lines': [
+ ['query_latency', 'query', 'absolute', 1, 1000],
+ ['fetch_latency', 'fetch', 'absolute', 1, 1000]
+ ]
+ },
+ 'index_performance_total': {
+ 'options': [None, 'Indexed Documents, Index Refreshes, Index Flushes To Disk', 'number of',
+ 'indexing performance', 'elastic.index_performance_total', 'stacked'],
+ 'lines': [
+ ['indices_indexing_index_total', 'indexed', 'incremental'],
+ ['indices_refresh_total', 'refreshes', 'incremental'],
+ ['indices_flush_total', 'flushes', 'incremental']
+ ]
+ },
+ 'index_performance_current': {
+ 'options': [None, 'Number Of Documents Currently Being Indexed', 'currently indexed',
+ 'indexing performance', 'elastic.index_performance_current', 'stacked'],
+ 'lines': [
+ ['indices_indexing_index_current', 'documents', 'absolute']
+ ]
+ },
+ 'index_performance_time': {
+ 'options': [None, 'Time Spent On Indexing, Refreshing, Flushing', 'seconds', 'indexing performance',
+ 'elastic.index_performance_time', 'stacked'],
+ 'lines': [
+ ['indices_indexing_index_time_in_millis', 'indexing', 'incremental', 1, 1000],
+ ['indices_refresh_total_time_in_millis', 'refreshing', 'incremental', 1, 1000],
+ ['indices_flush_total_time_in_millis', 'flushing', 'incremental', 1, 1000]
+ ]
+ },
+ 'index_latency': {
+ 'options': [None, 'Indexing And Flushing Latency', 'ms', 'indexing performance',
+ 'elastic.index_latency', 'stacked'],
+ 'lines': [
+ ['indexing_latency', 'indexing', 'absolute', 1, 1000],
+ ['flushing_latency', 'flushing', 'absolute', 1, 1000]
+ ]
+ },
+ 'index_translog_operations': {
+ 'options': [None, 'Translog Operations', 'count', 'translog',
+ 'elastic.index_translog_operations', 'area'],
+ 'lines': [
+ ['indices_translog_operations', 'total', 'absolute'],
+ ['indices_translog_uncommitted_operations', 'uncommited', 'absolute']
+ ]
+ },
+ 'index_translog_size': {
+ 'options': [None, 'Translog Size', 'MB', 'translog',
+ 'elastic.index_translog_size', 'area'],
+ 'lines': [
+ ['indices_translog_size_in_bytes', 'total', 'absolute', 1, 1048567],
+ ['indices_translog_uncommitted_size_in_bytes', 'uncommited', 'absolute', 1, 1048567]
+ ]
+ },
+ 'index_segments_count': {
+ 'options': [None, 'Total Number Of Indices Segments', 'count', 'indices segments',
+ 'elastic.index_segments_count', 'line'],
+ 'lines': [
+ ['indices_segments_count', 'segments', 'absolute']
+ ]
+ },
+ 'index_segments_memory_writer': {
+ 'options': [None, 'Index Writer Memory Usage', 'MB', 'indices segments',
+ 'elastic.index_segments_memory_writer', 'area'],
+ 'lines': [
+ ['indices_segments_index_writer_memory_in_bytes', 'total', 'absolute', 1, 1048567]
+ ]
+ },
+ 'index_segments_memory': {
+ 'options': [None, 'Indices Segments Memory Usage', 'MB', 'indices segments',
+ 'elastic.index_segments_memory', 'stacked'],
+ 'lines': [
+ ['indices_segments_terms_memory_in_bytes', 'terms', 'absolute', 1, 1048567],
+ ['indices_segments_stored_fields_memory_in_bytes', 'stored fields', 'absolute', 1, 1048567],
+ ['indices_segments_term_vectors_memory_in_bytes', 'term vectors', 'absolute', 1, 1048567],
+ ['indices_segments_norms_memory_in_bytes', 'norms', 'absolute', 1, 1048567],
+ ['indices_segments_points_memory_in_bytes', 'points', 'absolute', 1, 1048567],
+ ['indices_segments_doc_values_memory_in_bytes', 'doc values', 'absolute', 1, 1048567],
+ ['indices_segments_version_map_memory_in_bytes', 'version map', 'absolute', 1, 1048567],
+ ['indices_segments_fixed_bit_set_memory_in_bytes', 'fixed bit set', 'absolute', 1, 1048567]
+ ]
+ },
+ 'jvm_mem_heap': {
+ 'options': [None, 'JVM Heap Percentage Currently in Use', 'percent', 'memory usage and gc',
+ 'elastic.jvm_heap', 'area'],
+ 'lines': [
+ ['jvm_mem_heap_used_percent', 'inuse', 'absolute']
+ ]
+ },
+ 'jvm_mem_heap_bytes': {
+ 'options': [None, 'JVM Heap Commit And Usage', 'MB', 'memory usage and gc',
+ 'elastic.jvm_heap_bytes', 'area'],
+ 'lines': [
+ ['jvm_mem_heap_committed_in_bytes', 'commited', 'absolute', 1, 1048576],
+ ['jvm_mem_heap_used_in_bytes', 'used', 'absolute', 1, 1048576]
+ ]
+ },
+ 'jvm_buffer_pool_count': {
+ 'options': [None, 'JVM Buffers', 'count', 'memory usage and gc',
+ 'elastic.jvm_buffer_pool_count', 'line'],
+ 'lines': [
+ ['jvm_buffer_pools_direct_count', 'direct', 'absolute'],
+ ['jvm_buffer_pools_mapped_count', 'mapped', 'absolute']
+ ]
+ },
+ 'jvm_direct_buffers_memory': {
+ 'options': [None, 'JVM Direct Buffers Memory', 'MB', 'memory usage and gc',
+ 'elastic.jvm_direct_buffers_memory', 'area'],
+ 'lines': [
+ ['jvm_buffer_pools_direct_used_in_bytes', 'used', 'absolute', 1, 1048567],
+ ['jvm_buffer_pools_direct_total_capacity_in_bytes', 'total capacity', 'absolute', 1, 1048567]
+ ]
+ },
+ 'jvm_mapped_buffers_memory': {
+ 'options': [None, 'JVM Mapped Buffers Memory', 'MB', 'memory usage and gc',
+ 'elastic.jvm_mapped_buffers_memory', 'area'],
+ 'lines': [
+ ['jvm_buffer_pools_mapped_used_in_bytes', 'used', 'absolute', 1, 1048567],
+ ['jvm_buffer_pools_mapped_total_capacity_in_bytes', 'total capacity', 'absolute', 1, 1048567]
+ ]
+ },
+ 'jvm_gc_count': {
+ 'options': [None, 'Garbage Collections', 'counts', 'memory usage and gc', 'elastic.gc_count', 'stacked'],
+ 'lines': [
+ ['jvm_gc_collectors_young_collection_count', 'young', 'incremental'],
+ ['jvm_gc_collectors_old_collection_count', 'old', 'incremental']
+ ]
+ },
+ 'jvm_gc_time': {
+ 'options': [None, 'Time Spent On Garbage Collections', 'ms', 'memory usage and gc',
+ 'elastic.gc_time', 'stacked'],
+ 'lines': [
+ ['jvm_gc_collectors_young_collection_time_in_millis', 'young', 'incremental'],
+ ['jvm_gc_collectors_old_collection_time_in_millis', 'old', 'incremental']
+ ]
+ },
+ 'thread_pool_queued': {
+ 'options': [None, 'Number Of Queued Threads In Thread Pool', 'queued threads', 'queues and rejections',
+ 'elastic.thread_pool_queued', 'stacked'],
+ 'lines': [
+ ['thread_pool_bulk_queue', 'bulk', 'absolute'],
+ ['thread_pool_write_queue', 'write', 'absolute'],
+ ['thread_pool_index_queue', 'index', 'absolute'],
+ ['thread_pool_search_queue', 'search', 'absolute'],
+ ['thread_pool_merge_queue', 'merge', 'absolute']
+ ]
+ },
+ 'thread_pool_rejected': {
+ 'options': [None, 'Rejected Threads In Thread Pool', 'rejected threads', 'queues and rejections',
+ 'elastic.thread_pool_rejected', 'stacked'],
+ 'lines': [
+ ['thread_pool_bulk_rejected', 'bulk', 'absolute'],
+ ['thread_pool_write_rejected', 'write', 'absolute'],
+ ['thread_pool_index_rejected', 'index', 'absolute'],
+ ['thread_pool_search_rejected', 'search', 'absolute'],
+ ['thread_pool_merge_rejected', 'merge', 'absolute']
+ ]
+ },
+ 'fielddata_cache': {
+ 'options': [None, 'Fielddata Cache', 'MB', 'fielddata cache', 'elastic.fielddata_cache', 'line'],
+ 'lines': [
+ ['indices_fielddata_memory_size_in_bytes', 'cache', 'absolute', 1, 1048576]
+ ]
+ },
+ 'fielddata_evictions_tripped': {
+ 'options': [None, 'Fielddata Evictions And Circuit Breaker Tripped Count', 'number of events',
+ 'fielddata cache', 'elastic.fielddata_evictions_tripped', 'line'],
+ 'lines': [
+ ['indices_fielddata_evictions', 'evictions', 'incremental'],
+ ['indices_fielddata_tripped', 'tripped', 'incremental']
+ ]
+ },
+ 'cluster_health_nodes': {
+ 'options': [None, 'Nodes And Tasks Statistics', 'units', 'cluster health API',
+ 'elastic.cluster_health_nodes', 'stacked'],
+ 'lines': [
+ ['number_of_nodes', 'nodes', 'absolute'],
+ ['number_of_data_nodes', 'data_nodes', 'absolute'],
+ ['number_of_pending_tasks', 'pending_tasks', 'absolute'],
+ ['number_of_in_flight_fetch', 'in_flight_fetch', 'absolute']
+ ]
+ },
+ 'cluster_health_status': {
+ 'options': [None, 'Cluster Status', 'status', 'cluster health API',
+ 'elastic.cluster_health_status', 'area'],
+ 'lines': [
+ ['status_green', 'green', 'absolute'],
+ ['status_red', 'red', 'absolute'],
+ ['status_foo1', None, 'absolute'],
+ ['status_foo2', None, 'absolute'],
+ ['status_foo3', None, 'absolute'],
+ ['status_yellow', 'yellow', 'absolute']
+ ]
+ },
+ 'cluster_health_shards': {
+ 'options': [None, 'Shards Statistics', 'shards', 'cluster health API',
+ 'elastic.cluster_health_shards', 'stacked'],
+ 'lines': [
+ ['active_shards', 'active_shards', 'absolute'],
+ ['relocating_shards', 'relocating_shards', 'absolute'],
+ ['unassigned_shards', 'unassigned', 'absolute'],
+ ['delayed_unassigned_shards', 'delayed_unassigned', 'absolute'],
+ ['initializing_shards', 'initializing', 'absolute'],
+ ['active_shards_percent_as_number', 'active_percent', 'absolute']
+ ]
+ },
+ 'cluster_stats_nodes': {
+ 'options': [None, 'Nodes Statistics', 'nodes', 'cluster stats API',
+ 'elastic.cluster_nodes', 'stacked'],
+ 'lines': [
+ ['nodes_count_data_only', 'data_only', 'absolute'],
+ ['nodes_count_master_data', 'master_data', 'absolute'],
+ ['nodes_count_total', 'total', 'absolute'],
+ ['nodes_count_master_only', 'master_only', 'absolute'],
+ ['nodes_count_client', 'client', 'absolute']
+ ]
+ },
+ 'cluster_stats_query_cache': {
+ 'options': [None, 'Query Cache Statistics', 'queries', 'cluster stats API',
+ 'elastic.cluster_query_cache', 'stacked'],
+ 'lines': [
+ ['indices_query_cache_hit_count', 'hit', 'incremental'],
+ ['indices_query_cache_miss_count', 'miss', 'incremental']
+ ]
+ },
+ 'cluster_stats_docs': {
+ 'options': [None, 'Docs Statistics', 'count', 'cluster stats API',
+ 'elastic.cluster_docs', 'line'],
+ 'lines': [
+ ['indices_docs_count', 'docs', 'absolute']
+ ]
+ },
+ 'cluster_stats_store': {
+ 'options': [None, 'Store Statistics', 'MB', 'cluster stats API',
+ 'elastic.cluster_store', 'line'],
+ 'lines': [
+ ['indices_store_size_in_bytes', 'size', 'absolute', 1, 1048567]
+ ]
+ },
+ 'cluster_stats_indices_shards': {
+ 'options': [None, 'Indices And Shards Statistics', 'count', 'cluster stats API',
+ 'elastic.cluster_indices_shards', 'stacked'],
+ 'lines': [
+ ['indices_count', 'indices', 'absolute'],
+ ['indices_shards_total', 'shards', 'absolute']
+ ]
+ },
+ 'host_metrics_transport': {
+ 'options': [None, 'Cluster Communication Transport Metrics', 'kilobit/s', 'host metrics',
+ 'elastic.host_transport', 'area'],
+ 'lines': [
+ ['transport_rx_size_in_bytes', 'in', 'incremental', 8, 1000],
+ ['transport_tx_size_in_bytes', 'out', 'incremental', -8, 1000]
+ ]
+ },
+ 'host_metrics_file_descriptors': {
+ 'options': [None, 'Available File Descriptors In Percent', 'percent', 'host metrics',
+ 'elastic.host_descriptors', 'area'],
+ 'lines': [
+ ['file_descriptors_used', 'used', 'absolute', 1, 10]
+ ]
+ },
+ 'host_metrics_http': {
+ 'options': [None, 'Opened HTTP Connections', 'connections', 'host metrics',
+ 'elastic.host_http_connections', 'line'],
+ 'lines': [
+ ['http_current_open', 'opened', 'absolute', 1, 1]
+ ]
+ }
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.host = self.configuration.get('host')
+ self.port = self.configuration.get('port', 9200)
+ self.url = '{scheme}://{host}:{port}'.format(scheme=self.configuration.get('scheme', 'http'),
+ host=self.host,
+ port=self.port)
+ self.latency = dict()
+ self.methods = list()
+
+ def check(self):
+ if not all([self.host,
+ self.port,
+ isinstance(self.host, str),
+ isinstance(self.port, (str, int))]):
+ self.error('Host is not defined in the module configuration file')
+ return False
+
+ # Hostname -> ip address
+ try:
+ self.host = gethostbyname(self.host)
+ except gaierror as error:
+ self.error(str(error))
+ return False
+
+ # Create URL for every Elasticsearch API
+ self.methods = [METHODS(get_data=self._get_node_stats,
+ url=self.url + '/_nodes/_local/stats',
+ run=self.configuration.get('node_stats', True)),
+ METHODS(get_data=self._get_cluster_health,
+ url=self.url + '/_cluster/health',
+ run=self.configuration.get('cluster_health', True)),
+ METHODS(get_data=self._get_cluster_stats,
+ url=self.url + '/_cluster/stats',
+ run=self.configuration.get('cluster_stats', True))]
+
+ # Remove disabled API calls from 'avail methods'
+ return UrlService.check(self)
+
+ def _get_data(self):
+ threads = list()
+ queue = Queue()
+ result = dict()
+
+ for method in self.methods:
+ if not method.run:
+ continue
+ th = threading.Thread(target=method.get_data,
+ args=(queue, method.url))
+ th.start()
+ threads.append(th)
+
+ for thread in threads:
+ thread.join()
+ result.update(queue.get())
+
+ return result or None
+
+ def _get_cluster_health(self, queue, url):
+ """
+ Format data received from http request
+ :return: dict
+ """
+
+ raw_data = self._get_raw_data(url)
+
+ if not raw_data:
+ return queue.put(dict())
+
+ data = self.json_reply(raw_data)
+
+ if not data:
+ return queue.put(dict())
+
+ to_netdata = fetch_data_(raw_data=data,
+ metrics=HEALTH_STATS)
+
+ to_netdata.update({'status_green': 0, 'status_red': 0, 'status_yellow': 0,
+ 'status_foo1': 0, 'status_foo2': 0, 'status_foo3': 0})
+ current_status = 'status_' + data['status']
+ to_netdata[current_status] = 1
+
+ return queue.put(to_netdata)
+
+ def _get_cluster_stats(self, queue, url):
+ """
+ Format data received from http request
+ :return: dict
+ """
+
+ raw_data = self._get_raw_data(url)
+
+ if not raw_data:
+ return queue.put(dict())
+
+ data = self.json_reply(raw_data)
+
+ if not data:
+ return queue.put(dict())
+
+ to_netdata = fetch_data_(raw_data=data,
+ metrics=CLUSTER_STATS)
+
+ return queue.put(to_netdata)
+
+ def _get_node_stats(self, queue, url):
+ """
+ Format data received from http request
+ :return: dict
+ """
+
+ raw_data = self._get_raw_data(url)
+
+ if not raw_data:
+ return queue.put(dict())
+
+ data = self.json_reply(raw_data)
+
+ if not data:
+ return queue.put(dict())
+
+ node = list(data['nodes'].keys())[0]
+ to_netdata = fetch_data_(raw_data=data['nodes'][node],
+ metrics=NODE_STATS)
+
+ # Search, index, flush, fetch performance latency
+ for key in LATENCY:
+ try:
+ to_netdata[key] = self.find_avg(total=to_netdata[LATENCY[key]['total']],
+ spent_time=to_netdata[LATENCY[key]['spent_time']],
+ key=key)
+ except KeyError:
+ continue
+ if 'process_open_file_descriptors' in to_netdata and 'process_max_file_descriptors' in to_netdata:
+ to_netdata['file_descriptors_used'] = round(float(to_netdata['process_open_file_descriptors'])
+ / to_netdata['process_max_file_descriptors'] * 1000)
+
+ return queue.put(to_netdata)
+
+ def json_reply(self, reply):
+ try:
+ return json.loads(reply)
+ except ValueError as err:
+ self.error(err)
+ return None
+
+ def find_avg(self, total, spent_time, key):
+ if key not in self.latency:
+ self.latency[key] = dict(total=total,
+ spent_time=spent_time)
+ return 0
+ if self.latency[key]['total'] != total:
+ latency = float(spent_time - self.latency[key]['spent_time'])\
+ / float(total - self.latency[key]['total']) * 1000
+ self.latency[key]['total'] = total
+ self.latency[key]['spent_time'] = spent_time
+ return latency
+ self.latency[key]['spent_time'] = spent_time
+ return 0
+
+
+def fetch_data_(raw_data, metrics):
+ data = dict()
+ for metric in metrics:
+ value = raw_data
+ metrics_list = metric.split('.')
+ try:
+ for m in metrics_list:
+ value = value[m]
+ except KeyError:
+ continue
+ data['_'.join(metrics_list)] = value
+ return data
diff --git a/collectors/python.d.plugin/elasticsearch/elasticsearch.conf b/collectors/python.d.plugin/elasticsearch/elasticsearch.conf
new file mode 100644
index 000000000..213843bf9
--- /dev/null
+++ b/collectors/python.d.plugin/elasticsearch/elasticsearch.conf
@@ -0,0 +1,83 @@
+# netdata python.d.plugin configuration for elasticsearch stats
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, elasticsearch plugin also supports the following:
+#
+# host: 'ipaddress' # Server ip address or hostname.
+# port: 'port' # Port on which elasticsearch listen.
+# cluster_health: False/True # Calls to cluster health elasticsearch API. Enabled by default.
+# cluster_stats: False/True # Calls to cluster stats elasticsearch API. Enabled by default.
+#
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+#
+local:
+ host: '127.0.0.1'
+ port: '9200'
diff --git a/collectors/python.d.plugin/example/Makefile.inc b/collectors/python.d.plugin/example/Makefile.inc
new file mode 100644
index 000000000..1b027d5a7
--- /dev/null
+++ b/collectors/python.d.plugin/example/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += example/example.chart.py
+dist_pythonconfig_DATA += example/example.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += example/README.md example/Makefile.inc
+
diff --git a/collectors/python.d.plugin/example/README.md b/collectors/python.d.plugin/example/README.md
new file mode 100644
index 000000000..f9f314ac4
--- /dev/null
+++ b/collectors/python.d.plugin/example/README.md
@@ -0,0 +1 @@
+An example python data collection module. \ No newline at end of file
diff --git a/collectors/python.d.plugin/example/example.chart.py b/collectors/python.d.plugin/example/example.chart.py
new file mode 100644
index 000000000..85defa4d1
--- /dev/null
+++ b/collectors/python.d.plugin/example/example.chart.py
@@ -0,0 +1,48 @@
+# -*- coding: utf-8 -*-
+# Description: example netdata python.d module
+# Author: Put your name here (your github login)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from random import SystemRandom
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+# default module values
+# update_every = 4
+priority = 90000
+retries = 60
+
+ORDER = ['random']
+CHARTS = {
+ 'random': {
+ 'options': [None, 'A random number', 'random number', 'random', 'random', 'line'],
+ 'lines': [
+ ['random1']
+ ]
+ }
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.random = SystemRandom()
+
+ @staticmethod
+ def check():
+ return True
+
+ def get_data(self):
+ data = dict()
+
+ for i in range(1, 4):
+ dimension_id = ''.join(['random', str(i)])
+
+ if dimension_id not in self.charts['random']:
+ self.charts['random'].add_dimension([dimension_id])
+
+ data[dimension_id] = self.random.randint(0, 100)
+
+ return data
diff --git a/collectors/python.d.plugin/example/example.conf b/collectors/python.d.plugin/example/example.conf
new file mode 100644
index 000000000..e7fed9b50
--- /dev/null
+++ b/collectors/python.d.plugin/example/example.conf
@@ -0,0 +1,70 @@
+# netdata python.d.plugin configuration for example
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, example also supports the following:
+#
+# - none
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
diff --git a/collectors/python.d.plugin/exim/Makefile.inc b/collectors/python.d.plugin/exim/Makefile.inc
new file mode 100644
index 000000000..36ffa56d2
--- /dev/null
+++ b/collectors/python.d.plugin/exim/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += exim/exim.chart.py
+dist_pythonconfig_DATA += exim/exim.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += exim/README.md exim/Makefile.inc
+
diff --git a/collectors/python.d.plugin/exim/README.md b/collectors/python.d.plugin/exim/README.md
new file mode 100644
index 000000000..b9a62cad9
--- /dev/null
+++ b/collectors/python.d.plugin/exim/README.md
@@ -0,0 +1,13 @@
+# exim
+
+Simple module executing `exim -bpc` to grab exim queue.
+This command can take a lot of time to finish its execution thus it is not recommended to run it every second.
+
+It produces only one chart:
+
+1. **Exim Queue Emails**
+ * emails
+
+Configuration is not needed.
+
+---
diff --git a/collectors/python.d.plugin/exim/exim.chart.py b/collectors/python.d.plugin/exim/exim.chart.py
new file mode 100644
index 000000000..5431dd46b
--- /dev/null
+++ b/collectors/python.d.plugin/exim/exim.chart.py
@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+# Description: exim netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from bases.FrameworkServices.ExecutableService import ExecutableService
+
+# default module values (can be overridden per job in `config`)
+# update_every = 2
+priority = 60000
+retries = 60
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = ['qemails']
+
+CHARTS = {
+ 'qemails': {
+ 'options': [None, 'Exim Queue Emails', 'emails', 'queue', 'exim.qemails', 'line'],
+ 'lines': [
+ ['emails', None, 'absolute']
+ ]
+ }
+}
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(self, configuration=configuration, name=name)
+ self.command = 'exim -bpc'
+ self.order = ORDER
+ self.definitions = CHARTS
+
+ def _get_data(self):
+ """
+ Format data received from shell command
+ :return: dict
+ """
+ try:
+ return {'emails': int(self._get_raw_data()[0])}
+ except (ValueError, AttributeError):
+ return None
diff --git a/collectors/python.d.plugin/exim/exim.conf b/collectors/python.d.plugin/exim/exim.conf
new file mode 100644
index 000000000..2add7b2cb
--- /dev/null
+++ b/collectors/python.d.plugin/exim/exim.conf
@@ -0,0 +1,93 @@
+# netdata python.d.plugin configuration for exim
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# exim is slow, so once every 10 seconds
+update_every: 10
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, exim also supports the following:
+#
+# command: 'exim -bpc' # the command to run
+#
+
+# ----------------------------------------------------------------------
+# REQUIRED exim CONFIGURATION
+#
+# netdata will query exim as user netdata.
+# By default exim will refuse to respond.
+#
+# To allow querying exim as non-admin user, please set the following
+# to your exim configuration:
+#
+# queue_list_requires_admin = false
+#
+# Your exim configuration should be in
+#
+# /etc/exim/exim4.conf
+# or
+# /etc/exim4/conf.d/main/000_local_options
+#
+# Please consult your distribution information to find the exact file.
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+
+local:
+ command: 'exim -bpc'
diff --git a/collectors/python.d.plugin/fail2ban/Makefile.inc b/collectors/python.d.plugin/fail2ban/Makefile.inc
new file mode 100644
index 000000000..31e117e53
--- /dev/null
+++ b/collectors/python.d.plugin/fail2ban/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += fail2ban/fail2ban.chart.py
+dist_pythonconfig_DATA += fail2ban/fail2ban.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += fail2ban/README.md fail2ban/Makefile.inc
+
diff --git a/collectors/python.d.plugin/fail2ban/README.md b/collectors/python.d.plugin/fail2ban/README.md
new file mode 100644
index 000000000..2ab021965
--- /dev/null
+++ b/collectors/python.d.plugin/fail2ban/README.md
@@ -0,0 +1,23 @@
+# fail2ban
+
+Module monitor fail2ban log file to show all bans for all active jails
+
+**Requirements:**
+ * fail2ban.log file MUST BE readable by netdata (A good idea is to add **create 0640 root netdata** to fail2ban conf at logrotate.d)
+
+It produces one chart with multiple lines (one line per jail)
+
+### configuration
+
+Sample:
+
+```yaml
+local:
+ log_path: '/var/log/fail2ban.log'
+ conf_path: '/etc/fail2ban/jail.local'
+ exclude: 'dropbear apache'
+```
+If no configuration is given, module will attempt to read log file at `/var/log/fail2ban.log` and conf file at `/etc/fail2ban/jail.local`.
+If conf file is not found default jail is `ssh`.
+
+---
diff --git a/collectors/python.d.plugin/fail2ban/fail2ban.chart.py b/collectors/python.d.plugin/fail2ban/fail2ban.chart.py
new file mode 100644
index 000000000..954689008
--- /dev/null
+++ b/collectors/python.d.plugin/fail2ban/fail2ban.chart.py
@@ -0,0 +1,196 @@
+# -*- coding: utf-8 -*-
+# Description: fail2ban log netdata python.d module
+# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import re
+import os
+
+from collections import defaultdict
+from glob import glob
+
+from bases.FrameworkServices.LogService import LogService
+
+
+ORDER = [
+ 'jails_bans',
+ 'jails_in_jail',
+]
+
+
+def charts(jails):
+ """
+ Chart definitions creating
+ """
+
+ ch = {
+ ORDER[0]: {
+ 'options': [None, 'Jails Ban Rate', 'bans/s', 'bans', 'jail.bans', 'line'],
+ 'lines': []
+ },
+ ORDER[1]: {
+ 'options': [None, 'Banned IPs (since the last restart of netdata)', 'IPs', 'in jail',
+ 'jail.in_jail', 'line'],
+ 'lines': []
+ },
+ }
+ for jail in jails:
+ ch[ORDER[0]]['lines'].append([jail, jail, 'incremental'])
+ ch[ORDER[1]]['lines'].append(['{0}_in_jail'.format(jail), jail, 'absolute'])
+
+ return ch
+
+
+RE_JAILS = re.compile(r'\[([a-zA-Z0-9_-]+)\][^\[\]]+?enabled\s+= (true|false)')
+
+# Example:
+# 2018-09-12 11:45:53,715 fail2ban.actions[25029]: WARNING [ssh] Unban 195.201.88.33
+# 2018-09-12 11:45:58,727 fail2ban.actions[25029]: WARNING [ssh] Ban 217.59.246.27
+RE_DATA = re.compile(r'\[(?P<jail>[A-Za-z-_0-9]+)\] (?P<action>Unban|Ban) (?P<ip>[a-f0-9.:]+)')
+
+DEFAULT_JAILS = [
+ 'ssh',
+]
+
+
+class Service(LogService):
+ def __init__(self, configuration=None, name=None):
+ LogService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = dict()
+
+ self.log_path = self.configuration.get('log_path', '/var/log/fail2ban.log')
+ self.conf_path = self.configuration.get('conf_path', '/etc/fail2ban/jail.local')
+ self.conf_dir = self.configuration.get('conf_dir', '/etc/fail2ban/jail.d/')
+ self.exclude = self.configuration.get('exclude', str())
+
+ self.monitoring_jails = list()
+ self.banned_ips = defaultdict(set)
+ self.data = dict()
+
+ def check(self):
+ """
+ :return: bool
+ """
+ if not self.conf_path.endswith(('.conf', '.local')):
+ self.error('{0} is a wrong conf path name, must be *.conf or *.local'.format(self.conf_path))
+ return False
+
+ if not os.access(self.log_path, os.R_OK):
+ self.error('{0} is not readable'.format(self.log_path))
+ return False
+
+ if os.path.getsize(self.log_path) == 0:
+ self.error('{0} is empty'.format(self.log_path))
+ return False
+
+ self.monitoring_jails = self.jails_auto_detection()
+ for jail in self.monitoring_jails:
+ self.data[jail] = 0
+ self.data['{0}_in_jail'.format(jail)] = 0
+
+ self.definitions = charts(self.monitoring_jails)
+ self.info('monitoring jails: {0}'.format(self.monitoring_jails))
+
+ return True
+
+ def get_data(self):
+ """
+ :return: dict
+ """
+ raw = self._get_raw_data()
+
+ if not raw:
+ return None if raw is None else self.data
+
+ for row in raw:
+ match = RE_DATA.search(row)
+
+ if not match:
+ continue
+
+ match = match.groupdict()
+
+ if match['jail'] not in self.monitoring_jails:
+ continue
+
+ jail, action, ip = match['jail'], match['action'], match['ip']
+
+ if action == 'Ban':
+ self.data[jail] += 1
+ if ip not in self.banned_ips[jail]:
+ self.banned_ips[jail].add(ip)
+ self.data['{0}_in_jail'.format(jail)] += 1
+ else:
+ if ip in self.banned_ips[jail]:
+ self.banned_ips[jail].remove(ip)
+ self.data['{0}_in_jail'.format(jail)] -= 1
+
+ return self.data
+
+ def get_files_from_dir(self, dir_path, suffix):
+ """
+ :return: list
+ """
+ if not os.path.isdir(dir_path):
+ self.error('{0} is not a directory'.format(dir_path))
+ return list()
+
+ return glob('{0}/*.{1}'.format(self.conf_dir, suffix))
+
+ def get_jails_from_file(self, file_path):
+ """
+ :return: list
+ """
+ if not os.access(file_path, os.R_OK):
+ self.error('{0} is not readable or not exist'.format(file_path))
+ return list()
+
+ with open(file_path, 'rt') as f:
+ lines = f.readlines()
+ raw = ' '.join(line for line in lines if line.startswith(('[', 'enabled')))
+
+ match = RE_JAILS.findall(raw)
+ # Result: [('ssh', 'true'), ('dropbear', 'true'), ('pam-generic', 'true'), ...]
+
+ if not match:
+ self.debug('{0} parse failed'.format(file_path))
+ return list()
+
+ return match
+
+ def jails_auto_detection(self):
+ """
+ :return: list
+
+ Parses jail configuration files. Returns list of enabled jails.
+ According man jail.conf parse order must be
+ * jail.conf
+ * jail.d/*.conf (in alphabetical order)
+ * jail.local
+ * jail.d/*.local (in alphabetical order)
+ """
+ jails_files, all_jails, active_jails = list(), list(), list()
+
+ jails_files.append('{0}.conf'.format(self.conf_path.rsplit('.')[0]))
+ jails_files.extend(self.get_files_from_dir(self.conf_dir, 'conf'))
+ jails_files.append('{0}.local'.format(self.conf_path.rsplit('.')[0]))
+ jails_files.extend(self.get_files_from_dir(self.conf_dir, 'local'))
+
+ self.debug('config files to parse: {0}'.format(jails_files))
+
+ for f in jails_files:
+ all_jails.extend(self.get_jails_from_file(f))
+
+ exclude = self.exclude.split()
+
+ for name, status in all_jails:
+ if name in exclude:
+ continue
+
+ if status == 'true' and name not in active_jails:
+ active_jails.append(name)
+ elif status == 'false' and name in active_jails:
+ active_jails.remove(name)
+
+ return active_jails or DEFAULT_JAILS
diff --git a/collectors/python.d.plugin/fail2ban/fail2ban.conf b/collectors/python.d.plugin/fail2ban/fail2ban.conf
new file mode 100644
index 000000000..60ca87231
--- /dev/null
+++ b/collectors/python.d.plugin/fail2ban/fail2ban.conf
@@ -0,0 +1,70 @@
+# netdata python.d.plugin configuration for fail2ban
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, fail2ban also supports the following:
+#
+# log_path: 'path to fail2ban.log' # Default: '/var/log/fail2ban.log'
+# conf_path: 'path to jail.local/jail.conf' # Default: '/etc/fail2ban/jail.local'
+# conf_dir: 'path to jail.d/' # Default: '/etc/fail2ban/jail.d/'
+# exclude: 'jails you want to exclude from autodetection' # Default: none
+#------------------------------------------------------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/freeradius/Makefile.inc b/collectors/python.d.plugin/freeradius/Makefile.inc
new file mode 100644
index 000000000..54aa6492f
--- /dev/null
+++ b/collectors/python.d.plugin/freeradius/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += freeradius/freeradius.chart.py
+dist_pythonconfig_DATA += freeradius/freeradius.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += freeradius/README.md freeradius/Makefile.inc
+
diff --git a/collectors/python.d.plugin/freeradius/README.md b/collectors/python.d.plugin/freeradius/README.md
new file mode 100644
index 000000000..e5fe88ec3
--- /dev/null
+++ b/collectors/python.d.plugin/freeradius/README.md
@@ -0,0 +1,70 @@
+# freeradius
+
+Uses the `radclient` command to provide freeradius statistics. It is not recommended to run it every second.
+
+It produces:
+
+1. **Authentication counters:**
+ * access-accepts
+ * access-rejects
+ * auth-dropped-requests
+ * auth-duplicate-requests
+ * auth-invalid-requests
+ * auth-malformed-requests
+ * auth-unknown-types
+
+2. **Accounting counters:** [optional]
+ * accounting-requests
+ * accounting-responses
+ * acct-dropped-requests
+ * acct-duplicate-requests
+ * acct-invalid-requests
+ * acct-malformed-requests
+ * acct-unknown-types
+
+3. **Proxy authentication counters:** [optional]
+ * proxy-access-accepts
+ * proxy-access-rejects
+ * proxy-auth-dropped-requests
+ * proxy-auth-duplicate-requests
+ * proxy-auth-invalid-requests
+ * proxy-auth-malformed-requests
+ * proxy-auth-unknown-types
+
+4. **Proxy accounting counters:** [optional]
+ * proxy-accounting-requests
+ * proxy-accounting-responses
+ * proxy-acct-dropped-requests
+ * proxy-acct-duplicate-requests
+ * proxy-acct-invalid-requests
+ * proxy-acct-malformed-requests
+ * proxy-acct-unknown-typesa
+
+
+### configuration
+
+Sample:
+
+```yaml
+local:
+ host : 'localhost'
+ port : '18121'
+ secret : 'adminsecret'
+ acct : False # Freeradius accounting statistics.
+ proxy_auth : False # Freeradius proxy authentication statistics.
+ proxy_acct : False # Freeradius proxy accounting statistics.
+```
+
+**Freeradius server configuration:**
+
+The configuration for the status server is automatically created in the sites-available directory.
+By default, server is enabled and can be queried from every client.
+FreeRADIUS will only respond to status-server messages, if the status-server virtual server has been enabled.
+
+To do this, create a link from the sites-enabled directory to the status file in the sites-available directory:
+ * cd sites-enabled
+ * ln -s ../sites-available/status status
+
+and restart/reload your FREERADIUS server.
+
+---
diff --git a/collectors/python.d.plugin/freeradius/freeradius.chart.py b/collectors/python.d.plugin/freeradius/freeradius.chart.py
new file mode 100644
index 000000000..3126831b7
--- /dev/null
+++ b/collectors/python.d.plugin/freeradius/freeradius.chart.py
@@ -0,0 +1,129 @@
+# -*- coding: utf-8 -*-
+# Description: freeradius netdata python.d module
+# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from re import findall
+from subprocess import Popen, PIPE
+
+from bases.collection import find_binary
+from bases.FrameworkServices.SimpleService import SimpleService
+
+# default module values (can be overridden per job in `config`)
+priority = 60000
+retries = 60
+update_every = 15
+
+RADIUS_MSG = 'Message-Authenticator = 0x00, FreeRADIUS-Statistics-Type = 15, Response-Packet-Type = Access-Accept'
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = ['authentication', 'accounting', 'proxy-auth', 'proxy-acct']
+
+CHARTS = {
+ 'authentication': {
+ 'options': [None, 'Authentication', 'packets/s', 'Authentication', 'freerad.auth', 'line'],
+ 'lines': [
+ ['access-accepts', None, 'incremental'],
+ ['access-rejects', None, 'incremental'],
+ ['auth-dropped-requests', 'dropped-requests', 'incremental'],
+ ['auth-duplicate-requests', 'duplicate-requests', 'incremental'],
+ ['auth-invalid-requests', 'invalid-requests', 'incremental'],
+ ['auth-malformed-requests', 'malformed-requests', 'incremental'],
+ ['auth-unknown-types', 'unknown-types', 'incremental']
+ ]
+ },
+ 'accounting': {
+ 'options': [None, 'Accounting', 'packets/s', 'Accounting', 'freerad.acct', 'line'],
+ 'lines': [
+ ['accounting-requests', 'requests', 'incremental'],
+ ['accounting-responses', 'responses', 'incremental'],
+ ['acct-dropped-requests', 'dropped-requests', 'incremental'],
+ ['acct-duplicate-requests', 'duplicate-requests', 'incremental'],
+ ['acct-invalid-requests', 'invalid-requests', 'incremental'],
+ ['acct-malformed-requests', 'malformed-requests', 'incremental'],
+ ['acct-unknown-types', 'unknown-types', 'incremental']
+ ]
+ },
+ 'proxy-auth': {
+ 'options': [None, 'Proxy Authentication', 'packets/s', 'Authentication', 'freerad.proxy.auth', 'line'],
+ 'lines': [
+ ['proxy-access-accepts', 'access-accepts', 'incremental'],
+ ['proxy-access-rejects', 'access-rejects', 'incremental'],
+ ['proxy-auth-dropped-requests', 'dropped-requests', 'incremental'],
+ ['proxy-auth-duplicate-requests', 'duplicate-requests', 'incremental'],
+ ['proxy-auth-invalid-requests', 'invalid-requests', 'incremental'],
+ ['proxy-auth-malformed-requests', 'malformed-requests', 'incremental'],
+ ['proxy-auth-unknown-types', 'unknown-types', 'incremental']
+ ]
+ },
+ 'proxy-acct': {
+ 'options': [None, 'Proxy Accounting', 'packets/s', 'Accounting', 'freerad.proxy.acct', 'line'],
+ 'lines': [
+ ['proxy-accounting-requests', 'requests', 'incremental'],
+ ['proxy-accounting-responses', 'responses', 'incremental'],
+ ['proxy-acct-dropped-requests', 'dropped-requests', 'incremental'],
+ ['proxy-acct-duplicate-requests', 'duplicate-requests', 'incremental'],
+ ['proxy-acct-invalid-requests', 'invalid-requests', 'incremental'],
+ ['proxy-acct-malformed-requests', 'malformed-requests', 'incremental'],
+ ['proxy-acct-unknown-types', 'unknown-types', 'incremental']
+ ]
+ }
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.definitions = CHARTS
+ self.host = self.configuration.get('host', 'localhost')
+ self.port = self.configuration.get('port', '18121')
+ self.secret = self.configuration.get('secret')
+ self.acct = self.configuration.get('acct', False)
+ self.proxy_auth = self.configuration.get('proxy_auth', False)
+ self.proxy_acct = self.configuration.get('proxy_acct', False)
+ chart_choice = [True, bool(self.acct), bool(self.proxy_auth), bool(self.proxy_acct)]
+ self.order = [chart for chart, choice in zip(ORDER, chart_choice) if choice]
+ self.echo = find_binary('echo')
+ self.radclient = find_binary('radclient')
+ self.sub_echo = [self.echo, RADIUS_MSG]
+ self.sub_radclient = [self.radclient, '-r', '1', '-t', '1', '-x',
+ ':'.join([self.host, self.port]), 'status', self.secret]
+
+ def check(self):
+ if not all([self.echo, self.radclient]):
+ self.error('Can\'t locate "radclient" binary or binary is not executable by netdata')
+ return False
+ if not self.secret:
+ self.error('"secret" not set')
+ return None
+
+ if self._get_raw_data():
+ return True
+ self.error('Request returned no data. Is server alive?')
+ return False
+
+ def _get_data(self):
+ """
+ Format data received from shell command
+ :return: dict
+ """
+ result = self._get_raw_data()
+ return dict([(elem[0].lower(), int(elem[1])) for elem in findall(r'((?<=-)[AP][a-zA-Z-]+) = (\d+)', result)])
+
+ def _get_raw_data(self):
+ """
+ The following code is equivalent to
+ 'echo "Message-Authenticator = 0x00, FreeRADIUS-Statistics-Type = 15, Response-Packet-Type = Access-Accept"
+ | radclient -t 1 -r 1 host:port status secret'
+ :return: str
+ """
+ try:
+ process_echo = Popen(self.sub_echo, stdout=PIPE, stderr=PIPE, shell=False)
+ process_rad = Popen(self.sub_radclient, stdin=process_echo.stdout, stdout=PIPE, stderr=PIPE, shell=False)
+ process_echo.stdout.close()
+ raw_result = process_rad.communicate()[0]
+ except OSError:
+ return None
+ if process_rad.returncode is 0:
+ return raw_result.decode()
+ return None
diff --git a/collectors/python.d.plugin/freeradius/freeradius.conf b/collectors/python.d.plugin/freeradius/freeradius.conf
new file mode 100644
index 000000000..3336d4c49
--- /dev/null
+++ b/collectors/python.d.plugin/freeradius/freeradius.conf
@@ -0,0 +1,82 @@
+# netdata python.d.plugin configuration for freeradius
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, freeradius also supports the following:
+#
+# host: 'host' # Default: 'localhost'. Server ip address or hostname.
+# port: 'port' # Default: '18121'. Port on which freeradius server listen (type = status).
+# secret: 'secret' # Default: 'adminsecret'.
+# acct: yes/no # Default: no. Freeradius accounting statistics.
+# proxy_auth: yes/no # Default: no. Freeradius proxy authentication statistics.
+# proxy_acct: yes/no # Default: no. Freeradius proxy accounting statistics.
+#
+# ------------------------------------------------------------------------------------------------------------------
+# Freeradius server configuration:
+# The configuration for the status server is automatically created in the sites-available directory.
+# By default, server is enabled and can be queried from every client.
+# FreeRADIUS will only respond to status-server messages, if the status-server virtual server has been enabled.
+# To do this, create a link from the sites-enabled directory to the status file in the sites-available directory:
+# cd sites-enabled
+# ln -s ../sites-available/status status
+# and restart/reload your FREERADIUS server.
+# ------------------------------------------------------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/go_expvar/Makefile.inc b/collectors/python.d.plugin/go_expvar/Makefile.inc
new file mode 100644
index 000000000..74f50d765
--- /dev/null
+++ b/collectors/python.d.plugin/go_expvar/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += go_expvar/go_expvar.chart.py
+dist_pythonconfig_DATA += go_expvar/go_expvar.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += go_expvar/README.md go_expvar/Makefile.inc
+
diff --git a/collectors/python.d.plugin/go_expvar/README.md b/collectors/python.d.plugin/go_expvar/README.md
new file mode 100644
index 000000000..6309c195f
--- /dev/null
+++ b/collectors/python.d.plugin/go_expvar/README.md
@@ -0,0 +1,276 @@
+# go_expvar
+
+The `go_expvar` module can monitor any Go application that exposes its metrics with the use of
+`expvar` package from the Go standard library.
+
+`go_expvar` produces charts for Go runtime memory statistics and optionally any number of custom charts.
+
+For the memory statistics, it produces the following charts:
+
+1. **Heap allocations** in kB
+ * alloc: size of objects allocated on the heap
+ * inuse: size of allocated heap spans
+
+2. **Stack allocations** in kB
+ * inuse: size of allocated stack spans
+
+3. **MSpan allocations** in kB
+ * inuse: size of allocated mspan structures
+
+4. **MCache allocations** in kB
+ * inuse: size of allocated mcache structures
+
+5. **Virtual memory** in kB
+ * sys: size of reserved virtual address space
+
+6. **Live objects**
+ * live: number of live objects in memory
+
+7. **GC pauses average** in ns
+ * avg: average duration of all GC stop-the-world pauses
+
+
+## Monitoring Go Applications
+
+Netdata can be used to monitor running Go applications that expose their metrics with
+the use of the [expvar package](https://golang.org/pkg/expvar/) included in Go standard library.
+
+The `expvar` package exposes these metrics over HTTP and is very easy to use.
+Consider this minimal sample below:
+
+```go
+package main
+
+import (
+ _ "expvar"
+ "net/http"
+)
+
+func main() {
+ http.ListenAndServe("127.0.0.1:8080", nil)
+}
+```
+
+When imported this way, the `expvar` package registers a HTTP handler at `/debug/vars` that
+exposes Go runtime's memory statistics in JSON format. You can inspect the output by opening
+the URL in your browser (or by using `wget` or `curl`).
+
+Sample output:
+
+```json
+{
+"cmdline": ["./expvar-demo-binary"],
+"memstats": {"Alloc":630856,"TotalAlloc":630856,"Sys":3346432,"Lookups":27, <ommited for brevity>}
+}
+```
+
+You can of course expose and monitor your own variables as well.
+Here is a sample Go application that exposes a few custom variables:
+
+```go
+package main
+
+import (
+ "expvar"
+ "net/http"
+ "runtime"
+ "time"
+)
+
+func main() {
+
+ tick := time.NewTicker(1 * time.Second)
+ num_go := expvar.NewInt("runtime.goroutines")
+ counters := expvar.NewMap("counters")
+ counters.Set("cnt1", new(expvar.Int))
+ counters.Set("cnt2", new(expvar.Float))
+
+ go http.ListenAndServe(":8080", nil)
+
+ for {
+ select {
+ case <- tick.C:
+ num_go.Set(int64(runtime.NumGoroutine()))
+ counters.Add("cnt1", 1)
+ counters.AddFloat("cnt2", 1.452)
+ }
+ }
+}
+```
+
+Apart from the runtime memory stats, this application publishes two counters and the
+number of currently running Goroutines and updates these stats every second.
+
+In the next section, we will cover how to monitor and chart these exposed stats with
+the use of `netdata`s ```go_expvar``` module.
+
+### Using netdata go_expvar module
+
+The `go_expvar` module is disabled by default. To enable it, edit [`python.d.conf`](../python.d.conf)
+(to edit it on your system run `/etc/netdata/edit-config python.d.conf`), and change the `go_expvar`
+variable to `yes`:
+
+```
+# Enable / Disable python.d.plugin modules
+#default_run: yes
+#
+# If "default_run" = "yes" the default for all modules is enabled (yes).
+# Setting any of these to "no" will disable it.
+#
+# If "default_run" = "no" the default for all modules is disabled (no).
+# Setting any of these to "yes" will enable it.
+...
+go_expvar: yes
+...
+```
+
+Next, we need to edit the module configuration file (found at [`/etc/netdata/python.d/go_expvar.conf`](go_expvar.conf) by default)
+(to edit it on your system run `/etc/netdata/edit-config python.d/go_expvar.conf`).
+The module configuration consists of jobs, where each job can be used to monitor a separate Go application.
+Let's see a sample job configuration:
+
+```
+# /etc/netdata/python.d/go_expvar.conf
+
+app1:
+ name : 'app1'
+ url : 'http://127.0.0.1:8080/debug/vars'
+ collect_memstats: true
+ extra_charts: {}
+```
+
+Let's go over each of the defined options:
+
+ name: 'app1'
+
+This is the job name that will appear at the netdata dashboard.
+If not defined, the job_name (top level key) will be used.
+
+ url: 'http://127.0.0.1:8080/debug/vars'
+
+This is the URL of the expvar endpoint. As the expvar handler can be installed
+in a custom path, the whole URL has to be specified. This value is mandatory.
+
+ collect_memstats: true
+
+Whether to enable collecting stats about Go runtime's memory. You can find more
+information about the exposed values at the [runtime package docs](https://golang.org/pkg/runtime/#MemStats).
+
+ extra_charts: {}
+
+Enables the user to specify custom expvars to monitor and chart.
+Will be explained in more detail below.
+
+**Note: if `collect_memstats` is disabled and no `extra_charts` are defined, the plugin will
+disable itself, as there will be no data to collect!**
+
+Apart from these options, each job supports options inherited from netdata's `python.d.plugin`
+and its base `UrlService` class. These are:
+
+ update_every: 1 # the job's data collection frequency
+ priority: 60000 # the job's order on the dashboard
+ retries: 60 # the job's number of restoration attempts
+ user: admin # use when the expvar endpoint is protected by HTTP Basic Auth
+ password: sekret # use when the expvar endpoint is protected by HTTP Basic Auth
+
+### Monitoring custom vars with go_expvar
+
+Now, memory stats might be useful, but what if you want netdata to monitor some custom values
+that your Go application exposes? The `go_expvar` module can do that as well with the use of
+the `extra_charts` configuration variable.
+
+The `extra_charts` variable is a YaML list of netdata chart definitions.
+Each chart definition has the following keys:
+
+ id: netdata chart ID
+ options: a key-value mapping of chart options
+ lines: a list of line definitions
+
+**Note: please do not use dots in the chart or line ID field.
+See [this issue](https://github.com/netdata/netdata/pull/1902#issuecomment-284494195) for explanation.**
+
+Please see these two links to the official netdata documentation for more information about the values:
+
+- [External plugins - charts](../../plugins.d/#chart)
+- [Chart variables](https://github.com/netdata/netdata/wiki/How-to-write-new-module#global-variables-order-and-chart)
+
+**Line definitions**
+
+Each chart can define multiple lines (dimensions).
+A line definition is a key-value mapping of line options.
+Each line can have the following options:
+
+ # mandatory
+ expvar_key: the name of the expvar as present in the JSON output of /debug/vars endpoint
+ expvar_type: value type; supported are "float" or "int"
+ id: the id of this line/dimension in netdata
+
+ # optional - netdata defaults are used if these options are not defined
+ name: ''
+ algorithm: absolute
+ multiplier: 1
+ divisor: 100 if expvar_type == float, 1 if expvar_type == int
+ hidden: False
+
+Please see the following link for more information about the options and their default values:
+[External plugins - dimensions](../../plugins.d/#dimension)
+
+Apart from top-level expvars, this plugin can also parse expvars stored in a multi-level map;
+All dicts in the resulting JSON document are then flattened to one level.
+Expvar names are joined together with '.' when flattening.
+
+Example:
+```
+{
+ "counters": {"cnt1": 1042, "cnt2": 1512.9839999999983},
+ "runtime.goroutines": 5
+}
+```
+
+In the above case, the exported variables will be available under `runtime.goroutines`,
+`counters.cnt1` and `counters.cnt2` expvar_keys. If the flattening results in a key collision,
+the first defined key wins and all subsequent keys with the same name are ignored.
+
+**Configuration example**
+
+The configuration below matches the second Go application described above.
+Netdata will monitor and chart memory stats for the application, as well as a custom chart of
+running goroutines and two dummy counters.
+
+```
+app1:
+ name : 'app1'
+ url : 'http://127.0.0.1:8080/debug/vars'
+ collect_memstats: true
+ extra_charts:
+ - id: "runtime_goroutines"
+ options:
+ name: num_goroutines
+ title: "runtime: number of goroutines"
+ units: goroutines
+ family: runtime
+ context: expvar.runtime.goroutines
+ chart_type: line
+ lines:
+ - {expvar_key: 'runtime.goroutines', expvar_type: int, id: runtime_goroutines}
+ - id: "foo_counters"
+ options:
+ name: counters
+ title: "some random counters"
+ units: awesomeness
+ family: counters
+ context: expvar.foo.counters
+ chart_type: line
+ lines:
+ - {expvar_key: 'counters.cnt1', expvar_type: int, id: counters_cnt1}
+ - {expvar_key: 'counters.cnt2', expvar_type: float, id: counters_cnt2}
+```
+
+**Netdata charts example**
+
+The images below show how do the final charts in netdata look.
+
+![Memory stats charts](https://cloud.githubusercontent.com/assets/15180106/26762052/62b4af58-493b-11e7-9e69-146705acfc2c.png)
+
+![Custom charts](https://cloud.githubusercontent.com/assets/15180106/26762051/62ae915e-493b-11e7-8518-bd25a3886650.png)
+
diff --git a/collectors/python.d.plugin/go_expvar/go_expvar.chart.py b/collectors/python.d.plugin/go_expvar/go_expvar.chart.py
new file mode 100644
index 000000000..76e8b72ec
--- /dev/null
+++ b/collectors/python.d.plugin/go_expvar/go_expvar.chart.py
@@ -0,0 +1,245 @@
+# -*- coding: utf-8 -*-
+# Description: go_expvar netdata python.d module
+# Author: Jan Kral (kralewitz)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import division
+import json
+
+from bases.FrameworkServices.UrlService import UrlService
+
+# default module values (can be overridden per job in `config`)
+# update_every = 2
+priority = 60000
+retries = 60
+
+
+MEMSTATS_CHARTS = {
+ 'memstats_heap': {
+ 'options': ['heap', 'memory: size of heap memory structures', 'kB', 'memstats',
+ 'expvar.memstats.heap', 'line'],
+ 'lines': [
+ ['memstats_heap_alloc', 'alloc', 'absolute', 1, 1024],
+ ['memstats_heap_inuse', 'inuse', 'absolute', 1, 1024]
+ ]
+ },
+ 'memstats_stack': {
+ 'options': ['stack', 'memory: size of stack memory structures', 'kB', 'memstats',
+ 'expvar.memstats.stack', 'line'],
+ 'lines': [
+ ['memstats_stack_inuse', 'inuse', 'absolute', 1, 1024]
+ ]
+ },
+ 'memstats_mspan': {
+ 'options': ['mspan', 'memory: size of mspan memory structures', 'kB', 'memstats',
+ 'expvar.memstats.mspan', 'line'],
+ 'lines': [
+ ['memstats_mspan_inuse', 'inuse', 'absolute', 1, 1024]
+ ]
+ },
+ 'memstats_mcache': {
+ 'options': ['mcache', 'memory: size of mcache memory structures', 'kB', 'memstats',
+ 'expvar.memstats.mcache', 'line'],
+ 'lines': [
+ ['memstats_mcache_inuse', 'inuse', 'absolute', 1, 1024]
+ ]
+ },
+ 'memstats_live_objects': {
+ 'options': ['live_objects', 'memory: number of live objects', 'objects', 'memstats',
+ 'expvar.memstats.live_objects', 'line'],
+ 'lines': [
+ ['memstats_live_objects', 'live']
+ ]
+ },
+ 'memstats_sys': {
+ 'options': ['sys', 'memory: size of reserved virtual address space', 'kB', 'memstats',
+ 'expvar.memstats.sys', 'line'],
+ 'lines': [
+ ['memstats_sys', 'sys', 'absolute', 1, 1024]
+ ]
+ },
+ 'memstats_gc_pauses': {
+ 'options': ['gc_pauses', 'memory: average duration of GC pauses', 'ns', 'memstats',
+ 'expvar.memstats.gc_pauses', 'line'],
+ 'lines': [
+ ['memstats_gc_pauses', 'avg']
+ ]
+ }
+}
+
+MEMSTATS_ORDER = ['memstats_heap', 'memstats_stack', 'memstats_mspan', 'memstats_mcache',
+ 'memstats_sys', 'memstats_live_objects', 'memstats_gc_pauses']
+
+
+def flatten(d, top='', sep='.'):
+ items = []
+ for key, val in d.items():
+ nkey = top + sep + key if top else key
+ if isinstance(val, dict):
+ items.extend(flatten(val, nkey, sep=sep).items())
+ else:
+ items.append((nkey, val))
+ return dict(items)
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+
+ # if memstats collection is enabled, add the charts and their order
+ if self.configuration.get('collect_memstats'):
+ self.definitions = dict(MEMSTATS_CHARTS)
+ self.order = list(MEMSTATS_ORDER)
+ else:
+ self.definitions = dict()
+ self.order = list()
+
+ # if extra charts are defined, parse their config
+ extra_charts = self.configuration.get('extra_charts')
+ if extra_charts:
+ self._parse_extra_charts_config(extra_charts)
+
+ def check(self):
+ """
+ Check if the module can collect data:
+ 1) At least one JOB configuration has to be specified
+ 2) The JOB configuration needs to define the URL and either collect_memstats must be enabled or at least one
+ extra_chart must be defined.
+
+ The configuration and URL check is provided by the UrlService class.
+ """
+
+ if not (self.configuration.get('extra_charts') or self.configuration.get('collect_memstats')):
+ self.error('Memstats collection is disabled and no extra_charts are defined, disabling module.')
+ return False
+
+ return UrlService.check(self)
+
+ def _parse_extra_charts_config(self, extra_charts_config):
+
+ # a place to store the expvar keys and their types
+ self.expvars = dict()
+
+ for chart in extra_charts_config:
+
+ chart_dict = dict()
+ chart_id = chart.get('id')
+ chart_lines = chart.get('lines')
+ chart_opts = chart.get('options', dict())
+
+ if not all([chart_id, chart_lines]):
+ self.info('Chart {0} has no ID or no lines defined, skipping'.format(chart))
+ continue
+
+ chart_dict['options'] = [
+ chart_opts.get('name', ''),
+ chart_opts.get('title', ''),
+ chart_opts.get('units', ''),
+ chart_opts.get('family', ''),
+ chart_opts.get('context', ''),
+ chart_opts.get('chart_type', 'line')
+ ]
+ chart_dict['lines'] = list()
+
+ # add the lines to the chart
+ for line in chart_lines:
+
+ ev_key = line.get('expvar_key')
+ ev_type = line.get('expvar_type')
+ line_id = line.get('id')
+
+ if not all([ev_key, ev_type, line_id]):
+ self.info('Line missing expvar_key, expvar_type, or line_id, skipping: {0}'.format(line))
+ continue
+
+ if ev_type not in ['int', 'float']:
+ self.info('Unsupported expvar_type "{0}". Must be "int" or "float"'.format(ev_type))
+ continue
+
+ if ev_key in self.expvars:
+ self.info('Duplicate expvar key {0}: skipping line.'.format(ev_key))
+ continue
+
+ self.expvars[ev_key] = (ev_type, line_id)
+
+ chart_dict['lines'].append(
+ [
+ line.get('id', ''),
+ line.get('name', ''),
+ line.get('algorithm', ''),
+ line.get('multiplier', 1),
+ line.get('divisor', 100 if ev_type == 'float' else 1),
+ line.get('hidden', False)
+ ]
+ )
+
+ self.order.append(chart_id)
+ self.definitions[chart_id] = chart_dict
+
+ def _get_data(self):
+ """
+ Format data received from http request
+ :return: dict
+ """
+
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+
+ data = json.loads(raw_data)
+
+ expvars = dict()
+ if self.configuration.get('collect_memstats'):
+ expvars.update(self._parse_memstats(data))
+
+ if self.configuration.get('extra_charts'):
+ # the memstats part of the data has been already parsed, so we remove it before flattening and checking
+ # the rest of the data, thus avoiding needless iterating over the multiply nested memstats dict.
+ del (data['memstats'])
+ flattened = flatten(data)
+ for k, v in flattened.items():
+ ev = self.expvars.get(k)
+ if not ev:
+ # expvar is not defined in config, skip it
+ continue
+ try:
+ key_type, line_id = ev
+ if key_type == 'int':
+ expvars[line_id] = int(v)
+ elif key_type == 'float':
+ # if the value type is float, multiply it by 1000 and set line divisor to 1000
+ expvars[line_id] = float(v) * 100
+ except ValueError:
+ self.info('Failed to parse value for key {0} as {1}, ignoring key.'.format(k, key_type))
+ del self.expvars[k]
+
+ return expvars
+
+ @staticmethod
+ def _parse_memstats(data):
+
+ memstats = data['memstats']
+
+ # calculate the number of live objects in memory
+ live_objs = int(memstats['Mallocs']) - int(memstats['Frees'])
+
+ # calculate GC pause times average
+ # the Go runtime keeps the last 256 GC pause durations in a circular buffer,
+ # so we need to filter out the 0 values before the buffer is filled
+ gc_pauses = memstats['PauseNs']
+ try:
+ gc_pause_avg = sum(gc_pauses) / len([x for x in gc_pauses if x > 0])
+ # no GC cycles have occured yet
+ except ZeroDivisionError:
+ gc_pause_avg = 0
+
+ return {
+ 'memstats_heap_alloc': memstats['HeapAlloc'],
+ 'memstats_heap_inuse': memstats['HeapInuse'],
+ 'memstats_stack_inuse': memstats['StackInuse'],
+ 'memstats_mspan_inuse': memstats['MSpanInuse'],
+ 'memstats_mcache_inuse': memstats['MCacheInuse'],
+ 'memstats_sys': memstats['Sys'],
+ 'memstats_live_objects': live_objs,
+ 'memstats_gc_pauses': gc_pause_avg,
+ }
diff --git a/collectors/python.d.plugin/go_expvar/go_expvar.conf b/collectors/python.d.plugin/go_expvar/go_expvar.conf
new file mode 100644
index 000000000..ba8922d2e
--- /dev/null
+++ b/collectors/python.d.plugin/go_expvar/go_expvar.conf
@@ -0,0 +1,110 @@
+# netdata python.d.plugin configuration for go_expvar
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, this plugin also supports the following:
+#
+# url: 'http://127.0.0.1/debug/vars' # the URL of the expvar endpoint
+#
+# As the plugin cannot possibly know the port your application listens on, there is no default value. Please include
+# the whole path of the endpoint, as the expvar handler can be installed in a non-standard location.
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+# collect_memstats: true # enables charts for Go runtime's memory statistics
+# extra_charts: {} # defines extra data/charts to monitor, please see the example below
+#
+# If collect_memstats is disabled and no extra charts are defined, this module will disable itself, as it has no data to
+# collect.
+#
+# Please visit the module wiki page for more information on how to use the extra_charts variable:
+#
+# https://github.com/netdata/netdata/wiki/Monitoring-Go-Applications#monitoring-custom-vars-with-go_expvar
+#
+# Configuration example
+# ---------------------
+
+#app1:
+# name : 'app1'
+# url : 'http://127.0.0.1:8080/debug/vars'
+# collect_memstats: true
+# extra_charts:
+# - id: "runtime_goroutines"
+# options:
+# name: num_goroutines
+# title: "runtime: number of goroutines"
+# units: goroutines
+# family: runtime
+# context: expvar.runtime.goroutines
+# chart_type: line
+# lines:
+# - {expvar_key: 'runtime.goroutines', expvar_type: int, id: runtime_goroutines}
+# - id: "foo_counters"
+# options:
+# name: counters
+# title: "some random counters"
+# units: awesomeness
+# family: counters
+# context: expvar.foo.counters
+# chart_type: line
+# lines:
+# - {expvar_key: 'counters.cnt1', expvar_type: int, id: counters_cnt1}
+# - {expvar_key: 'counters.cnt2', expvar_type: float, id: counters_cnt2}
+
diff --git a/collectors/python.d.plugin/haproxy/Makefile.inc b/collectors/python.d.plugin/haproxy/Makefile.inc
new file mode 100644
index 000000000..ad24deaa0
--- /dev/null
+++ b/collectors/python.d.plugin/haproxy/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += haproxy/haproxy.chart.py
+dist_pythonconfig_DATA += haproxy/haproxy.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += haproxy/README.md haproxy/Makefile.inc
+
diff --git a/collectors/python.d.plugin/haproxy/README.md b/collectors/python.d.plugin/haproxy/README.md
new file mode 100644
index 000000000..4bff25670
--- /dev/null
+++ b/collectors/python.d.plugin/haproxy/README.md
@@ -0,0 +1,49 @@
+# haproxy
+
+Module monitors frontend and backend metrics such as bytes in, bytes out, sessions current, sessions in queue current.
+And health metrics such as backend servers status (server check should be used).
+
+Plugin can obtain data from url **OR** unix socket.
+
+**Requirement:**
+Socket MUST be readable AND writable by netdata user.
+
+It produces:
+
+1. **Frontend** family charts
+ * Kilobytes in/s
+ * Kilobytes out/s
+ * Sessions current
+ * Sessions in queue current
+
+2. **Backend** family charts
+ * Kilobytes in/s
+ * Kilobytes out/s
+ * Sessions current
+ * Sessions in queue current
+
+3. **Health** chart
+ * number of failed servers for every backend (in DOWN state)
+
+
+### configuration
+
+Sample:
+
+```yaml
+via_url:
+ user : 'username' # ONLY IF stats auth is used
+ pass : 'password' # # ONLY IF stats auth is used
+ url : 'http://ip.address:port/url;csv;norefresh'
+```
+
+OR
+
+```yaml
+via_socket:
+ socket : 'path/to/haproxy/sock'
+```
+
+If no configuration is given, module will fail to run.
+
+---
diff --git a/collectors/python.d.plugin/haproxy/haproxy.chart.py b/collectors/python.d.plugin/haproxy/haproxy.chart.py
new file mode 100644
index 000000000..a46689f50
--- /dev/null
+++ b/collectors/python.d.plugin/haproxy/haproxy.chart.py
@@ -0,0 +1,370 @@
+# -*- coding: utf-8 -*-
+# Description: haproxy netdata python.d module
+# Author: l2isbad, ktarasz
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from collections import defaultdict
+from re import compile as re_compile
+
+try:
+ from urlparse import urlparse
+except ImportError:
+ from urllib.parse import urlparse
+
+from bases.FrameworkServices.SocketService import SocketService
+from bases.FrameworkServices.UrlService import UrlService
+
+
+# default module values (can be overridden per job in `config`)
+# update_every = 2
+priority = 60000
+retries = 60
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = [
+ 'fbin',
+ 'fbout',
+ 'fscur',
+ 'fqcur',
+ 'fhrsp_1xx',
+ 'fhrsp_2xx',
+ 'fhrsp_3xx',
+ 'fhrsp_4xx',
+ 'fhrsp_5xx',
+ 'fhrsp_other',
+ 'fhrsp_total',
+ 'bbin',
+ 'bbout',
+ 'bscur',
+ 'bqcur',
+ 'bhrsp_1xx',
+ 'bhrsp_2xx',
+ 'bhrsp_3xx',
+ 'bhrsp_4xx',
+ 'bhrsp_5xx',
+ 'bhrsp_other',
+ 'bhrsp_total',
+ 'bqtime',
+ 'bttime',
+ 'brtime',
+ 'bctime',
+ 'health_sup',
+ 'health_sdown',
+ 'health_bdown',
+ 'health_idle'
+]
+
+CHARTS = {
+ 'fbin': {
+ 'options': [None, 'Kilobytes In', 'KB/s', 'frontend', 'haproxy_f.bin', 'line'],
+ 'lines': []
+ },
+ 'fbout': {
+ 'options': [None, 'Kilobytes Out', 'KB/s', 'frontend', 'haproxy_f.bout', 'line'],
+ 'lines': []
+ },
+ 'fscur': {
+ 'options': [None, 'Sessions Active', 'sessions', 'frontend', 'haproxy_f.scur', 'line'],
+ 'lines': []
+ },
+ 'fqcur': {
+ 'options': [None, 'Session In Queue', 'sessions', 'frontend', 'haproxy_f.qcur', 'line'],
+ 'lines': []
+ },
+ 'fhrsp_1xx': {
+ 'options': [None, 'HTTP responses with 1xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_1xx', 'line'],
+ 'lines': []
+ },
+ 'fhrsp_2xx': {
+ 'options': [None, 'HTTP responses with 2xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_2xx', 'line'],
+ 'lines': []
+ },
+ 'fhrsp_3xx': {
+ 'options': [None, 'HTTP responses with 3xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_3xx', 'line'],
+ 'lines': []
+ },
+ 'fhrsp_4xx': {
+ 'options': [None, 'HTTP responses with 4xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_4xx', 'line'],
+ 'lines': []
+ },
+ 'fhrsp_5xx': {
+ 'options': [None, 'HTTP responses with 5xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_5xx', 'line'],
+ 'lines': []
+ },
+ 'fhrsp_other': {
+ 'options': [None, 'HTTP responses with other codes (protocol error)', 'responses/s', 'frontend',
+ 'haproxy_f.hrsp_other', 'line'],
+ 'lines': []
+ },
+ 'fhrsp_total': {
+ 'options': [None, 'HTTP responses', 'responses', 'frontend', 'haproxy_f.hrsp_total', 'line'],
+ 'lines': []
+ },
+ 'bbin': {
+ 'options': [None, 'Kilobytes In', 'KB/s', 'backend', 'haproxy_b.bin', 'line'],
+ 'lines': []
+ },
+ 'bbout': {
+ 'options': [None, 'Kilobytes Out', 'KB/s', 'backend', 'haproxy_b.bout', 'line'],
+ 'lines': []
+ },
+ 'bscur': {
+ 'options': [None, 'Sessions Active', 'sessions', 'backend', 'haproxy_b.scur', 'line'],
+ 'lines': []
+ },
+ 'bqcur': {
+ 'options': [None, 'Sessions In Queue', 'sessions', 'backend', 'haproxy_b.qcur', 'line'],
+ 'lines': []
+ },
+ 'bhrsp_1xx': {
+ 'options': [None, 'HTTP responses with 1xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_1xx', 'line'],
+ 'lines': []
+ },
+ 'bhrsp_2xx': {
+ 'options': [None, 'HTTP responses with 2xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_2xx', 'line'],
+ 'lines': []
+ },
+ 'bhrsp_3xx': {
+ 'options': [None, 'HTTP responses with 3xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_3xx', 'line'],
+ 'lines': []
+ },
+ 'bhrsp_4xx': {
+ 'options': [None, 'HTTP responses with 4xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_4xx', 'line'],
+ 'lines': []
+ },
+ 'bhrsp_5xx': {
+ 'options': [None, 'HTTP responses with 5xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_5xx', 'line'],
+ 'lines': []
+ },
+ 'bhrsp_other': {
+ 'options': [None, 'HTTP responses with other codes (protocol error)', 'responses/s', 'backend',
+ 'haproxy_b.hrsp_other', 'line'],
+ 'lines': []
+ },
+ 'bhrsp_total': {
+ 'options': [None, 'HTTP responses (total)', 'responses/s', 'backend', 'haproxy_b.hrsp_total', 'line'],
+ 'lines': []
+ },
+ 'bqtime': {
+ 'options': [None, 'The average queue time over the 1024 last requests', 'ms', 'backend',
+ 'haproxy_b.qtime', 'line'],
+ 'lines': []
+ },
+ 'bctime': {
+ 'options': [None, 'The average connect time over the 1024 last requests', 'ms', 'backend',
+ 'haproxy_b.ctime', 'line'],
+ 'lines': []
+ },
+ 'brtime': {
+ 'options': [None, 'The average response time over the 1024 last requests', 'ms', 'backend',
+ 'haproxy_b.rtime', 'line'],
+ 'lines': []
+ },
+ 'bttime': {
+ 'options': [None, 'The average total session time over the 1024 last requests', 'ms', 'backend',
+ 'haproxy_b.ttime', 'line'],
+ 'lines': []
+ },
+ 'health_sdown': {
+ 'options': [None, 'Backend Servers In DOWN State', 'failed servers', 'health',
+ 'haproxy_hs.down', 'line'],
+ 'lines': []
+ },
+ 'health_sup': {
+ 'options': [None, 'Backend Servers In UP State', 'health servers', 'health',
+ 'haproxy_hs.up', 'line'],
+ 'lines': []
+ },
+ 'health_bdown': {
+ 'options': [None, 'Is Backend Alive? 1 = DOWN', 'failed backend', 'health', 'haproxy_hb.down', 'line'],
+ 'lines': []
+ },
+ 'health_idle': {
+ 'options': [None, 'The Ratio Of Polling Time Vs Total Time', 'percent', 'health', 'haproxy.idle', 'line'],
+ 'lines': [
+ ['idle', None, 'absolute']
+ ]
+ }
+}
+
+
+METRICS = {
+ 'bin': {'algorithm': 'incremental', 'divisor': 1024},
+ 'bout': {'algorithm': 'incremental', 'divisor': 1024},
+ 'scur': {'algorithm': 'absolute', 'divisor': 1},
+ 'qcur': {'algorithm': 'absolute', 'divisor': 1},
+ 'hrsp_1xx': {'algorithm': 'incremental', 'divisor': 1},
+ 'hrsp_2xx': {'algorithm': 'incremental', 'divisor': 1},
+ 'hrsp_3xx': {'algorithm': 'incremental', 'divisor': 1},
+ 'hrsp_4xx': {'algorithm': 'incremental', 'divisor': 1},
+ 'hrsp_5xx': {'algorithm': 'incremental', 'divisor': 1},
+ 'hrsp_other': {'algorithm': 'incremental', 'divisor': 1}
+}
+
+
+BACKEND_METRICS = {
+ 'qtime': {'algorithm': 'absolute', 'divisor': 1},
+ 'ctime': {'algorithm': 'absolute', 'divisor': 1},
+ 'rtime': {'algorithm': 'absolute', 'divisor': 1},
+ 'ttime': {'algorithm': 'absolute', 'divisor': 1}
+}
+
+
+REGEX = dict(url=re_compile(r'idle = (?P<idle>[0-9]+)'),
+ socket=re_compile(r'Idle_pct: (?P<idle>[0-9]+)'))
+
+
+class Service(UrlService, SocketService):
+ def __init__(self, configuration=None, name=None):
+ if 'socket' in configuration:
+ SocketService.__init__(self, configuration=configuration, name=name)
+ self.poll = SocketService
+ self.options_ = dict(regex=REGEX['socket'],
+ stat='show stat\n'.encode(),
+ info='show info\n'.encode())
+ else:
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.poll = UrlService
+ self.options_ = dict(regex=REGEX['url'],
+ stat=self.url,
+ info=url_remove_params(self.url))
+ self.order = ORDER
+ self.definitions = CHARTS
+
+ def check(self):
+ if self.poll.check(self):
+ self.create_charts()
+ self.info('We are using %s.' % self.poll.__name__)
+ return True
+ return False
+
+ def _get_data(self):
+ to_netdata = dict()
+ self.request, self.url = self.options_['stat'], self.options_['stat']
+ stat_data = self._get_stat_data()
+ self.request, self.url = self.options_['info'], self.options_['info']
+ info_data = self._get_info_data(regex=self.options_['regex'])
+
+ to_netdata.update(stat_data)
+ to_netdata.update(info_data)
+ return to_netdata or None
+
+ def _get_stat_data(self):
+ """
+ :return: dict
+ """
+ raw_data = self.poll._get_raw_data(self)
+
+ if not raw_data:
+ return dict()
+
+ raw_data = raw_data.splitlines()
+ self.data = parse_data_([dict(zip(raw_data[0].split(','), raw_data[_].split(',')))
+ for _ in range(1, len(raw_data))])
+ if not self.data:
+ return dict()
+
+ stat_data = dict()
+
+ for frontend in self.data['frontend']:
+ for metric in METRICS:
+ idx = frontend['# pxname'].replace('.', '_')
+ stat_data['_'.join(['frontend', metric, idx])] = frontend.get(metric) or 0
+
+ for backend in self.data['backend']:
+ name, idx = backend['# pxname'], backend['# pxname'].replace('.', '_')
+ stat_data['hsup_' + idx] = len([server for server in self.data['servers']
+ if server_status(server, name, 'UP')])
+ stat_data['hsdown_' + idx] = len([server for server in self.data['servers']
+ if server_status(server, name, 'DOWN')])
+ stat_data['hbdown_' + idx] = 1 if backend.get('status') == 'DOWN' else 0
+ for metric in BACKEND_METRICS:
+ stat_data['_'.join(['backend', metric, idx])] = backend.get(metric) or 0
+ hrsp_total = 0
+ for metric in METRICS:
+ stat_data['_'.join(['backend', metric, idx])] = backend.get(metric) or 0
+ if metric.startswith('hrsp_'):
+ hrsp_total += int(backend.get(metric) or 0)
+ stat_data['_'.join(['backend', 'hrsp_total', idx])] = hrsp_total
+ return stat_data
+
+ def _get_info_data(self, regex):
+ """
+ :return: dict
+ """
+ raw_data = self.poll._get_raw_data(self)
+ if not raw_data:
+ return dict()
+
+ match = regex.search(raw_data)
+ return match.groupdict() if match else dict()
+
+ @staticmethod
+ def _check_raw_data(data):
+ """
+ Check if all data has been gathered from socket
+ :param data: str
+ :return: boolean
+ """
+ return not bool(data)
+
+ def create_charts(self):
+ for front in self.data['frontend']:
+ name, idx = front['# pxname'], front['# pxname'].replace('.', '_')
+ for metric in METRICS:
+ self.definitions['f' + metric]['lines'].append(['_'.join(['frontend', metric, idx]),
+ name, METRICS[metric]['algorithm'], 1,
+ METRICS[metric]['divisor']])
+ self.definitions['fhrsp_total']['lines'].append(['_'.join(['frontend', 'hrsp_total', idx]),
+ name, 'incremental', 1, 1])
+ for back in self.data['backend']:
+ name, idx = back['# pxname'], back['# pxname'].replace('.', '_')
+ for metric in METRICS:
+ self.definitions['b' + metric]['lines'].append(['_'.join(['backend', metric, idx]),
+ name, METRICS[metric]['algorithm'], 1,
+ METRICS[metric]['divisor']])
+ self.definitions['bhrsp_total']['lines'].append(['_'.join(['backend', 'hrsp_total', idx]),
+ name, 'incremental', 1, 1])
+ for metric in BACKEND_METRICS:
+ self.definitions['b' + metric]['lines'].append(['_'.join(['backend', metric, idx]),
+ name, BACKEND_METRICS[metric]['algorithm'], 1,
+ BACKEND_METRICS[metric]['divisor']])
+ self.definitions['health_sup']['lines'].append(['hsup_' + idx, name, 'absolute'])
+ self.definitions['health_sdown']['lines'].append(['hsdown_' + idx, name, 'absolute'])
+ self.definitions['health_bdown']['lines'].append(['hbdown_' + idx, name, 'absolute'])
+
+
+def parse_data_(data):
+ def is_backend(backend):
+ return backend.get('svname') == 'BACKEND' and backend.get('# pxname') != 'stats'
+
+ def is_frontend(frontend):
+ return frontend.get('svname') == 'FRONTEND' and frontend.get('# pxname') != 'stats'
+
+ def is_server(server):
+ return not server.get('svname', '').startswith(('FRONTEND', 'BACKEND'))
+
+ if not data:
+ return None
+
+ result = defaultdict(list)
+ for elem in data:
+ if is_backend(elem):
+ result['backend'].append(elem)
+ continue
+ elif is_frontend(elem):
+ result['frontend'].append(elem)
+ continue
+ elif is_server(elem):
+ result['servers'].append(elem)
+
+ return result or None
+
+
+def server_status(server, backend_name, status='DOWN'):
+ return server.get('# pxname') == backend_name and server.get('status') == status
+
+
+def url_remove_params(url):
+ parsed = urlparse(url or str())
+ return '{scheme}://{netloc}{path}'.format(scheme=parsed.scheme, netloc=parsed.netloc, path=parsed.path)
diff --git a/collectors/python.d.plugin/haproxy/haproxy.conf b/collectors/python.d.plugin/haproxy/haproxy.conf
new file mode 100644
index 000000000..a40dd76a5
--- /dev/null
+++ b/collectors/python.d.plugin/haproxy/haproxy.conf
@@ -0,0 +1,85 @@
+# netdata python.d.plugin configuration for haproxy
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, haproxy also supports the following:
+#
+# IMPORTANT: socket MUST BE readable AND writable by netdata user
+#
+# socket: 'path/to/haproxy/sock'
+#
+# OR
+# url: 'http://<ip.address>:<port>/<url>;csv;norefresh'
+# [user: USERNAME] only if stats auth is used
+# [pass: PASSWORD] only if stats auth is used
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+#via_url:
+# user : 'admin'
+# pass : 'password'
+# url : 'http://127.0.0.1:7000/haproxy_stats;csv;norefresh'
+
+#via_socket:
+# socket: '/var/run/haproxy/admin.sock'
diff --git a/collectors/python.d.plugin/hddtemp/Makefile.inc b/collectors/python.d.plugin/hddtemp/Makefile.inc
new file mode 100644
index 000000000..22852b646
--- /dev/null
+++ b/collectors/python.d.plugin/hddtemp/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += hddtemp/hddtemp.chart.py
+dist_pythonconfig_DATA += hddtemp/hddtemp.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += hddtemp/README.md hddtemp/Makefile.inc
+
diff --git a/collectors/python.d.plugin/hddtemp/README.md b/collectors/python.d.plugin/hddtemp/README.md
new file mode 100644
index 000000000..1236186a5
--- /dev/null
+++ b/collectors/python.d.plugin/hddtemp/README.md
@@ -0,0 +1,22 @@
+# hddtemp
+
+Module monitors disk temperatures from one or more hddtemp daemons.
+
+**Requirement:**
+Running `hddtemp` in daemonized mode with access on tcp port
+
+It produces one chart **Temperature** with dynamic number of dimensions (one per disk)
+
+### configuration
+
+Sample:
+
+```yaml
+update_every: 3
+host: "127.0.0.1"
+port: 7634
+```
+
+If no configuration is given, module will attempt to connect to hddtemp daemon on `127.0.0.1:7634` address
+
+---
diff --git a/collectors/python.d.plugin/hddtemp/hddtemp.chart.py b/collectors/python.d.plugin/hddtemp/hddtemp.chart.py
new file mode 100644
index 000000000..dea701171
--- /dev/null
+++ b/collectors/python.d.plugin/hddtemp/hddtemp.chart.py
@@ -0,0 +1,100 @@
+# -*- coding: utf-8 -*-
+# Description: hddtemp netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+import re
+
+from copy import deepcopy
+
+from bases.FrameworkServices.SocketService import SocketService
+
+
+ORDER = ['temperatures']
+
+CHARTS = {
+ 'temperatures': {
+ 'options': ['disks_temp', 'Disks Temperatures', 'Celsius', 'temperatures', 'hddtemp.temperatures', 'line'],
+ 'lines': [
+ # lines are created dynamically in `check()` method
+ ]}}
+
+RE = re.compile(r'\/dev\/([^|]+)\|([^|]+)\|([0-9]+|SLP|UNK)\|')
+
+
+class Disk:
+ def __init__(self, id_, name, temp):
+ self.id = id_.split('/')[-1]
+ self.name = name.replace(' ', '_')
+ self.temp = temp if temp.isdigit() else 0
+
+ def __repr__(self):
+ return self.id
+
+
+class Service(SocketService):
+ def __init__(self, configuration=None, name=None):
+ SocketService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = deepcopy(CHARTS)
+ self._keep_alive = False
+ self.request = ""
+ self.host = "127.0.0.1"
+ self.port = 7634
+ self.do_only = self.configuration.get('devices')
+
+ def get_disks(self):
+ r = self._get_raw_data()
+
+ if not r:
+ return None
+
+ m = RE.findall(r)
+
+ if not m:
+ self.error("received data doesn't have needed records")
+ return None
+
+ rv = [Disk(*d) for d in m]
+ self.debug('available disks: {0}'.format(rv))
+
+ if self.do_only:
+ return [v for v in rv if v.id in self.do_only]
+ return rv
+
+ def get_data(self):
+ """
+ Get data from TCP/IP socket
+ :return: dict
+ """
+
+ disks = self.get_disks()
+
+ if not disks:
+ return None
+
+ return dict((d.id, d.temp) for d in disks)
+
+ def check(self):
+ """
+ Parse configuration, check if hddtemp is available, and dynamically create chart lines data
+ :return: boolean
+ """
+ self._parse_config()
+ disks = self.get_disks()
+
+ if not disks:
+ return False
+
+ for d in disks:
+ n = d.id if d.id.startswith('sd') else d.name
+ dim = [d.id, n]
+ self.definitions['temperatures']['lines'].append(dim)
+
+ return True
+
+ @staticmethod
+ def _check_raw_data(data):
+ return not bool(data)
diff --git a/collectors/python.d.plugin/hddtemp/hddtemp.conf b/collectors/python.d.plugin/hddtemp/hddtemp.conf
new file mode 100644
index 000000000..9165798a2
--- /dev/null
+++ b/collectors/python.d.plugin/hddtemp/hddtemp.conf
@@ -0,0 +1,97 @@
+# netdata python.d.plugin configuration for hddtemp
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, hddtemp also supports the following:
+#
+# host: 'IP or HOSTNAME' # the host to connect to
+# port: PORT # the port to connect to
+#
+
+# By default this module will try to autodetect disks
+# (autodetection works only for disk which names start with "sd").
+# However this can be overridden by setting variable `disks` to
+# array of desired disks. Example for two disks:
+#
+# devices:
+# - sda
+# - sdb
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name: 'local'
+ host: 'localhost'
+ port: 7634
+
+localipv4:
+ name: 'local'
+ host: '127.0.0.1'
+ port: 7634
+
+localipv6:
+ name: 'local'
+ host: '::1'
+ port: 7634
diff --git a/collectors/python.d.plugin/httpcheck/Makefile.inc b/collectors/python.d.plugin/httpcheck/Makefile.inc
new file mode 100644
index 000000000..4a5bd856d
--- /dev/null
+++ b/collectors/python.d.plugin/httpcheck/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += httpcheck/httpcheck.chart.py
+dist_pythonconfig_DATA += httpcheck/httpcheck.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += httpcheck/README.md httpcheck/Makefile.inc
+
diff --git a/collectors/python.d.plugin/httpcheck/README.md b/collectors/python.d.plugin/httpcheck/README.md
new file mode 100644
index 000000000..759107663
--- /dev/null
+++ b/collectors/python.d.plugin/httpcheck/README.md
@@ -0,0 +1,41 @@
+# httpcheck
+
+Module monitors remote http server for availability and response time.
+
+Following charts are drawn per job:
+
+1. **Response time** ms
+ * Time in 0.1 ms resolution in which the server responds.
+ If the connection failed, the value is missing.
+
+2. **Status** boolean
+ * Connection successful
+ * Unexpected content: No Regex match found in the response
+ * Unexpected status code: Do we get 500 errors?
+ * Connection failed: port not listening or blocked
+ * Connection timed out: host or port unreachable
+
+### configuration
+
+Sample configuration and their default values.
+
+```yaml
+server:
+ url: 'http://host:port/path' # required
+ status_accepted: # optional
+ - 200
+ timeout: 1 # optional, supports decimals (e.g. 0.2)
+ update_every: 3 # optional
+ regex: 'REGULAR_EXPRESSION' # optional, see https://docs.python.org/3/howto/regex.html
+ redirect: yes # optional
+```
+
+### notes
+
+ * The status chart is primarily intended for alarms, badges or for access via API.
+ * A system/service/firewall might block netdata's access if a portscan or
+ similar is detected.
+ * This plugin is meant for simple use cases. Currently, the accuracy of the
+ response time is low and should be used as reference only.
+
+---
diff --git a/collectors/python.d.plugin/httpcheck/httpcheck.chart.py b/collectors/python.d.plugin/httpcheck/httpcheck.chart.py
new file mode 100644
index 000000000..f046f33c0
--- /dev/null
+++ b/collectors/python.d.plugin/httpcheck/httpcheck.chart.py
@@ -0,0 +1,121 @@
+# -*- coding: utf-8 -*-
+# Description: http check netdata python.d module
+# Original Author: ccremer (github.com/ccremer)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import urllib3
+import re
+
+try:
+ from time import monotonic as time
+except ImportError:
+ from time import time
+
+from bases.FrameworkServices.UrlService import UrlService
+
+# default module values (can be overridden per job in `config`)
+update_every = 3
+priority = 60000
+retries = 60
+
+# Response
+HTTP_RESPONSE_TIME = 'time'
+HTTP_RESPONSE_LENGTH = 'length'
+
+# Status dimensions
+HTTP_SUCCESS = 'success'
+HTTP_BAD_CONTENT = 'bad_content'
+HTTP_BAD_STATUS = 'bad_status'
+HTTP_TIMEOUT = 'timeout'
+HTTP_NO_CONNECTION = 'no_connection'
+
+ORDER = ['response_time', 'response_length', 'status']
+
+CHARTS = {
+ 'response_time': {
+ 'options': [None, 'HTTP response time', 'ms', 'response', 'httpcheck.responsetime', 'line'],
+ 'lines': [
+ [HTTP_RESPONSE_TIME, 'time', 'absolute', 100, 1000]
+ ]
+ },
+ 'response_length': {
+ 'options': [None, 'HTTP response body length', 'characters', 'response', 'httpcheck.responselength', 'line'],
+ 'lines': [
+ [HTTP_RESPONSE_LENGTH, 'length', 'absolute']
+ ]
+ },
+ 'status': {
+ 'options': [None, 'HTTP status', 'boolean', 'status', 'httpcheck.status', 'line'],
+ 'lines': [
+ [HTTP_SUCCESS, 'success', 'absolute'],
+ [HTTP_BAD_CONTENT, 'bad content', 'absolute'],
+ [HTTP_BAD_STATUS, 'bad status', 'absolute'],
+ [HTTP_TIMEOUT, 'timeout', 'absolute'],
+ [HTTP_NO_CONNECTION, 'no connection', 'absolute']
+ ]
+ }
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ pattern = self.configuration.get('regex')
+ self.regex = re.compile(pattern) if pattern else None
+ self.status_codes_accepted = self.configuration.get('status_accepted', [200])
+ self.follow_redirect = self.configuration.get('redirect', True)
+ self.order = ORDER
+ self.definitions = CHARTS
+
+ def _get_data(self):
+ """
+ Format data received from http request
+ :return: dict
+ """
+ data = dict()
+ data[HTTP_SUCCESS] = 0
+ data[HTTP_BAD_CONTENT] = 0
+ data[HTTP_BAD_STATUS] = 0
+ data[HTTP_TIMEOUT] = 0
+ data[HTTP_NO_CONNECTION] = 0
+ url = self.url
+ try:
+ start = time()
+ status, content = self._get_raw_data_with_status(retries=1 if self.follow_redirect else False,
+ redirect=self.follow_redirect)
+ diff = time() - start
+ data[HTTP_RESPONSE_TIME] = max(round(diff * 10000), 0)
+ self.debug('Url: {url}. Host responded with status code {code} in {diff} s'.format(
+ url=url, code=status, diff=diff
+ ))
+ self.process_response(content, data, status)
+
+ except urllib3.exceptions.NewConnectionError as error:
+ self.debug('Connection failed: {url}. Error: {error}'.format(url=url, error=error))
+ data[HTTP_NO_CONNECTION] = 1
+
+ except (urllib3.exceptions.TimeoutError, urllib3.exceptions.PoolError) as error:
+ self.debug('Connection timed out: {url}. Error: {error}'.format(url=url, error=error))
+ data[HTTP_TIMEOUT] = 1
+
+ except urllib3.exceptions.HTTPError as error:
+ self.debug('Connection failed: {url}. Error: {error}'.format(url=url, error=error))
+ data[HTTP_NO_CONNECTION] = 1
+
+ except (TypeError, AttributeError) as error:
+ self.error('Url: {url}. Error: {error}'.format(url=url, error=error))
+ return None
+
+ return data
+
+ def process_response(self, content, data, status):
+ data[HTTP_RESPONSE_LENGTH] = len(content)
+ self.debug('Content: \n\n{content}\n'.format(content=content))
+ if status in self.status_codes_accepted:
+ if self.regex and self.regex.search(content) is None:
+ self.debug('No match for regex "{regex}" found'.format(regex=self.regex.pattern))
+ data[HTTP_BAD_CONTENT] = 1
+ else:
+ data[HTTP_SUCCESS] = 1
+ else:
+ data[HTTP_BAD_STATUS] = 1
diff --git a/collectors/python.d.plugin/httpcheck/httpcheck.conf b/collectors/python.d.plugin/httpcheck/httpcheck.conf
new file mode 100644
index 000000000..bd21b5af8
--- /dev/null
+++ b/collectors/python.d.plugin/httpcheck/httpcheck.conf
@@ -0,0 +1,100 @@
+# netdata python.d.plugin configuration for httpcheck
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the httpcheck default is used, which is at 3 seconds.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# chart_cleanup sets the default chart cleanup interval in iterations.
+# A chart is marked as obsolete if it has not been updated
+# 'chart_cleanup' iterations in a row.
+# They will be hidden immediately (not offered to dashboard viewer,
+# streamed upstream and archived to backends) and deleted one hour
+# later (configurable from netdata.conf).
+# -- For this plugin, cleanup MUST be disabled, otherwise we lose response
+# time charts
+chart_cleanup: 0
+
+# Autodetection and retries do not work for this plugin
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# -------------------------------
+# ATTENTION: Any valid configuration will be accepted, even if initial connection fails!
+# -------------------------------
+#
+# There is intentionally no default config, e.g. for 'localhost'
+
+# job_name:
+# name: myname # [optional] the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 3 # [optional] the JOB's data collection frequency
+# priority: 60000 # [optional] the JOB's order on the dashboard
+# retries: 60 # [optional] the JOB's number of restoration attempts
+# timeout: 1 # [optional] the timeout when connecting, supports decimals (e.g. 0.5s)
+# url: 'http[s]://host-ip-or-dns[:port][path]'
+# # [required] the remote host url to connect to. If [:port] is missing, it defaults to 80
+# # for HTTP and 443 for HTTPS. [path] is optional too, defaults to /
+# method: GET # [optional] the HTTP request method (POST, PUT, DELETE, HEAD etc.)
+# redirect: yes # [optional] If the remote host returns 3xx status codes, the redirection url will be
+# # followed (default).
+# status_accepted: # [optional] By default, 200 is accepted. Anything else will result in 'bad status' in the
+# # status chart, however: The response time will still be > 0, since the
+# # host responded with something.
+# # If redirect is enabled, the accepted status will be checked against the redirected page.
+# - 200 # Multiple status codes are possible. If you specify 'status_accepted', you would still
+# # need to add '200'. E.g. 'status_accepted: [301]' will trigger an error in 'bad status'
+# # if code is 200. Do specify numerical entries such as 200, not 'OK'.
+# regex: None # [optional] If the status code is accepted, the content of the response will be searched for this
+# # regex (if defined). Be aware that you may need to escape the regex string. If redirect is enabled,
+# # the regex will be matched to the redirected page, not the initial 3xx response.
+
+# Simple example:
+#
+# jira:
+# url: 'https://jira.localdomain/'
+
+
+# Complex example:
+#
+# cool_website:
+# url: 'http://cool.website:8080/home'
+# status_accepted:
+# - 200
+# - 204
+# regex: <title>My cool website!<\/title>
+# timeout: 2
+
+# This plugin is intended for simple cases. Currently, the accuracy of the response time is low and should be used as reference only.
+
diff --git a/collectors/python.d.plugin/icecast/Makefile.inc b/collectors/python.d.plugin/icecast/Makefile.inc
new file mode 100644
index 000000000..cb7c6fa0e
--- /dev/null
+++ b/collectors/python.d.plugin/icecast/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += icecast/icecast.chart.py
+dist_pythonconfig_DATA += icecast/icecast.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += icecast/README.md icecast/Makefile.inc
+
diff --git a/collectors/python.d.plugin/icecast/README.md b/collectors/python.d.plugin/icecast/README.md
new file mode 100644
index 000000000..a28a6c398
--- /dev/null
+++ b/collectors/python.d.plugin/icecast/README.md
@@ -0,0 +1,26 @@
+# icecast
+
+This module will monitor number of listeners for active sources.
+
+**Requirements:**
+ * icecast version >= 2.4.0
+
+It produces the following charts:
+
+1. **Listeners** in listeners
+ * source number
+
+### configuration
+
+Needs only `url` to server's `/status-json.xsl`
+
+Here is an example for remote server:
+
+```yaml
+remote:
+ url : 'http://1.2.3.4:8443/status-json.xsl'
+```
+
+Without configuration, module attempts to connect to `http://localhost:8443/status-json.xsl`
+
+---
diff --git a/collectors/python.d.plugin/icecast/icecast.chart.py b/collectors/python.d.plugin/icecast/icecast.chart.py
new file mode 100644
index 000000000..d8813f9ba
--- /dev/null
+++ b/collectors/python.d.plugin/icecast/icecast.chart.py
@@ -0,0 +1,97 @@
+# -*- coding: utf-8 -*-
+# Description: icecast netdata python.d module
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import json
+
+from bases.FrameworkServices.UrlService import UrlService
+
+
+priority = 60000
+retries = 60
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = ['listeners']
+
+CHARTS = {
+ 'listeners': {
+ 'options': [None, 'Number Of Listeners', 'listeners', 'listeners', 'icecast.listeners', 'line'],
+ 'lines': [
+ ]
+ }
+}
+
+
+class Source:
+ def __init__(self, idx, data):
+ self.name = 'source_{0}'.format(idx)
+ self.is_active = data.get('stream_start') and data.get('server_name')
+ self.listeners = data['listeners']
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.url = self.configuration.get('url')
+ self._manager = self._build_manager()
+
+ def check(self):
+ """
+ Add active sources to the "listeners" chart
+ :return: bool
+ """
+ sources = self.get_sources()
+ if not sources:
+ return None
+
+ active_sources = 0
+ for idx, raw_source in enumerate(sources):
+ if Source(idx, raw_source).is_active:
+ active_sources += 1
+ dim_id = 'source_{0}'.format(idx)
+ dim = 'source {0}'.format(idx)
+ self.definitions['listeners']['lines'].append([dim_id, dim])
+
+ return bool(active_sources)
+
+ def _get_data(self):
+ """
+ Get number of listeners for every source
+ :return: dict
+ """
+ sources = self.get_sources()
+ if not sources:
+ return None
+
+ data = dict()
+
+ for idx, raw_source in enumerate(sources):
+ source = Source(idx, raw_source)
+ data[source.name] = source.listeners
+
+ return data
+
+ def get_sources(self):
+ """
+ Format data received from http request and return list of sources
+ :return: list
+ """
+
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+
+ try:
+ data = json.loads(raw_data)
+ except ValueError as error:
+ self.error('JSON decode error:', error)
+ return None
+
+ sources = data['icestats'].get('source')
+ if not sources:
+ return None
+
+ return sources if isinstance(sources, list) else [sources]
diff --git a/collectors/python.d.plugin/icecast/icecast.conf b/collectors/python.d.plugin/icecast/icecast.conf
new file mode 100644
index 000000000..a900d06d3
--- /dev/null
+++ b/collectors/python.d.plugin/icecast/icecast.conf
@@ -0,0 +1,83 @@
+# netdata python.d.plugin configuration for icecast
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, icecast also supports the following:
+#
+# url: 'URL' # the URL to fetch icecast's stats
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ url : 'http://localhost:8443/status-json.xsl'
+
+localipv4:
+ name : 'local'
+ url : 'http://127.0.0.1:8443/status-json.xsl' \ No newline at end of file
diff --git a/collectors/python.d.plugin/ipfs/Makefile.inc b/collectors/python.d.plugin/ipfs/Makefile.inc
new file mode 100644
index 000000000..68458cb38
--- /dev/null
+++ b/collectors/python.d.plugin/ipfs/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += ipfs/ipfs.chart.py
+dist_pythonconfig_DATA += ipfs/ipfs.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += ipfs/README.md ipfs/Makefile.inc
+
diff --git a/collectors/python.d.plugin/ipfs/README.md b/collectors/python.d.plugin/ipfs/README.md
new file mode 100644
index 000000000..a30649a5f
--- /dev/null
+++ b/collectors/python.d.plugin/ipfs/README.md
@@ -0,0 +1,25 @@
+# ipfs
+
+Module monitors [IPFS](https://ipfs.io) basic information.
+
+1. **Bandwidth** in kbits/s
+ * in
+ * out
+
+2. **Peers**
+ * peers
+
+### configuration
+
+Only url to IPFS server is needed.
+
+Sample:
+
+```yaml
+localhost:
+ name : 'local'
+ url : 'http://localhost:5001'
+```
+
+---
+
diff --git a/collectors/python.d.plugin/ipfs/ipfs.chart.py b/collectors/python.d.plugin/ipfs/ipfs.chart.py
new file mode 100644
index 000000000..3f6794e48
--- /dev/null
+++ b/collectors/python.d.plugin/ipfs/ipfs.chart.py
@@ -0,0 +1,140 @@
+# -*- coding: utf-8 -*-
+# Description: IPFS netdata python.d module
+# Authors: davidak
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import json
+
+from bases.FrameworkServices.UrlService import UrlService
+
+# default module values (can be overridden per job in `config`)
+# update_every = 2
+priority = 60000
+retries = 60
+
+# default job configuration (overridden by python.d.plugin)
+# config = {'local': {
+# 'update_every': update_every,
+# 'retries': retries,
+# 'priority': priority,
+# 'url': 'http://localhost:5001'
+# }}
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = ['bandwidth', 'peers', 'repo_size', 'repo_objects']
+
+CHARTS = {
+ 'bandwidth': {
+ 'options': [None, 'IPFS Bandwidth', 'kbits/s', 'Bandwidth', 'ipfs.bandwidth', 'line'],
+ 'lines': [
+ ['in', None, 'absolute', 8, 1000],
+ ['out', None, 'absolute', -8, 1000]
+ ]
+ },
+ 'peers': {
+ 'options': [None, 'IPFS Peers', 'peers', 'Peers', 'ipfs.peers', 'line'],
+ 'lines': [
+ ['peers', None, 'absolute']
+ ]
+ },
+ 'repo_size': {
+ 'options': [None, 'IPFS Repo Size', 'GB', 'Size', 'ipfs.repo_size', 'area'],
+ 'lines': [
+ ['avail', None, 'absolute', 1, 1e9],
+ ['size', None, 'absolute', 1, 1e9],
+ ]
+ },
+ 'repo_objects': {
+ 'options': [None, 'IPFS Repo Objects', 'objects', 'Objects', 'ipfs.repo_objects', 'line'],
+ 'lines': [
+ ['objects', None, 'absolute', 1, 1],
+ ['pinned', None, 'absolute', 1, 1],
+ ['recursive_pins', None, 'absolute', 1, 1]
+ ]
+ }
+}
+
+SI_zeroes = {
+ 'k': 3,
+ 'm': 6,
+ 'g': 9,
+ 't': 12,
+ 'p': 15,
+ 'e': 18,
+ 'z': 21,
+ 'y': 24
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.baseurl = self.configuration.get('url', 'http://localhost:5001')
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.__storage_max = None
+ self.do_pinapi = self.configuration.get('pinapi')
+
+ def _get_json(self, sub_url):
+ """
+ :return: json decoding of the specified url
+ """
+ self.url = self.baseurl + sub_url
+ try:
+ return json.loads(self._get_raw_data())
+ except (TypeError, ValueError):
+ return dict()
+
+ @staticmethod
+ def _recursive_pins(keys):
+ return sum(1 for k in keys if keys[k]['Type'] == b'recursive')
+
+ @staticmethod
+ def _dehumanize(store_max):
+ # convert from '10Gb' to 10000000000
+ if not isinstance(store_max, int):
+ store_max = store_max.lower()
+ if store_max.endswith('b'):
+ val, units = store_max[:-2], store_max[-2]
+ if units in SI_zeroes:
+ val += '0'*SI_zeroes[units]
+ store_max = val
+ try:
+ store_max = int(store_max)
+ except (TypeError, ValueError):
+ store_max = None
+ return store_max
+
+ def _storagemax(self, store_cfg):
+ if self.__storage_max is None:
+ self.__storage_max = self._dehumanize(store_cfg)
+ return self.__storage_max
+
+ def _get_data(self):
+ """
+ Get data from API
+ :return: dict
+ """
+ # suburl : List of (result-key, original-key, transform-func)
+ cfg = {
+ '/api/v0/stats/bw':
+ [('in', 'RateIn', int), ('out', 'RateOut', int)],
+ '/api/v0/swarm/peers':
+ [('peers', 'Peers', len)],
+ '/api/v0/stats/repo':
+ [('size', 'RepoSize', int), ('objects', 'NumObjects', int), ('avail', 'StorageMax', self._storagemax)],
+ }
+ if self.do_pinapi:
+ cfg.update({
+ '/api/v0/pin/ls':
+ [('pinned', 'Keys', len), ('recursive_pins', 'Keys', self._recursive_pins)]
+ })
+ r = dict()
+ for suburl in cfg:
+ in_json = self._get_json(suburl)
+ for new_key, orig_key, xmute in cfg[suburl]:
+ try:
+ r[new_key] = xmute(in_json[orig_key])
+ except Exception:
+ continue
+ return r or None
diff --git a/collectors/python.d.plugin/ipfs/ipfs.conf b/collectors/python.d.plugin/ipfs/ipfs.conf
new file mode 100644
index 000000000..e3df0f6bb
--- /dev/null
+++ b/collectors/python.d.plugin/ipfs/ipfs.conf
@@ -0,0 +1,79 @@
+# netdata python.d.plugin configuration for ipfs
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, ipfs also supports the following:
+#
+# url: 'URL' # URL to the IPFS API
+# pinapi: no # Set status of IPFS pinned object polling
+# # Currently defaults to disabled due to IPFS Bug
+# # https://github.com/ipfs/go-ipfs/issues/3874
+# # resulting in very high CPU Usage
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ url : 'http://localhost:5001'
+ pinapi : no
diff --git a/collectors/python.d.plugin/isc_dhcpd/Makefile.inc b/collectors/python.d.plugin/isc_dhcpd/Makefile.inc
new file mode 100644
index 000000000..44343fc9d
--- /dev/null
+++ b/collectors/python.d.plugin/isc_dhcpd/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += isc_dhcpd/isc_dhcpd.chart.py
+dist_pythonconfig_DATA += isc_dhcpd/isc_dhcpd.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += isc_dhcpd/README.md isc_dhcpd/Makefile.inc
+
diff --git a/collectors/python.d.plugin/isc_dhcpd/README.md b/collectors/python.d.plugin/isc_dhcpd/README.md
new file mode 100644
index 000000000..334d86e33
--- /dev/null
+++ b/collectors/python.d.plugin/isc_dhcpd/README.md
@@ -0,0 +1,34 @@
+# isc_dhcpd
+
+Module monitor leases database to show all active leases for given pools.
+
+**Requirements:**
+ * dhcpd leases file MUST BE readable by netdata
+ * pools MUST BE in CIDR format
+
+It produces:
+
+1. **Pools utilization** Aggregate chart for all pools.
+ * utilization in percent
+
+2. **Total leases**
+ * leases (overall number of leases for all pools)
+
+3. **Active leases** for every pools
+ * leases (number of active leases in pool)
+
+
+### configuration
+
+Sample:
+
+```yaml
+local:
+ leases_path : '/var/lib/dhcp/dhcpd.leases'
+ pools : '192.168.3.0/24 192.168.4.0/24 192.168.5.0/24'
+```
+
+In case of python2 you need to install `py2-ipaddress` to make plugin work.
+The module will not work If no configuration is given.
+
+---
diff --git a/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py
new file mode 100644
index 000000000..a9f274949
--- /dev/null
+++ b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py
@@ -0,0 +1,195 @@
+# -*- coding: utf-8 -*-
+# Description: isc dhcpd lease netdata python.d module
+# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import os
+import re
+import time
+
+
+try:
+ import ipaddress
+ HAVE_IP_ADDRESS = True
+except ImportError:
+ HAVE_IP_ADDRESS = False
+
+from collections import defaultdict
+from copy import deepcopy
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+priority = 60000
+retries = 60
+
+ORDER = ['pools_utilization', 'pools_active_leases', 'leases_total']
+
+CHARTS = {
+ 'pools_utilization': {
+ 'options': [None, 'Pools Utilization', '%', 'utilization', 'isc_dhcpd.utilization', 'line'],
+ 'lines': []
+ },
+ 'pools_active_leases': {
+ 'options': [None, 'Active Leases Per Pool', 'leases', 'active leases', 'isc_dhcpd.active_leases', 'line'],
+ 'lines': []
+ },
+ 'leases_total': {
+ 'options': [None, 'All Active Leases', 'leases', 'active leases', 'isc_dhcpd.leases_total', 'line'],
+ 'lines': [
+ ['leases_total', 'leases', 'absolute']
+ ],
+ 'variables': [
+ ['leases_size']
+ ]
+ }
+}
+
+
+class DhcpdLeasesFile:
+ def __init__(self, path):
+ self.path = path
+ self.mod_time = 0
+ self.size = 0
+
+ def is_valid(self):
+ return os.path.isfile(self.path) and os.access(self.path, os.R_OK)
+
+ def is_changed(self):
+ mod_time = os.path.getmtime(self.path)
+ if mod_time != self.mod_time:
+ self.mod_time = mod_time
+ self.size = int(os.path.getsize(self.path) / 1024)
+ return True
+ return False
+
+ def get_data(self):
+ try:
+ with open(self.path) as leases:
+ result = defaultdict(dict)
+ for row in leases:
+ row = row.strip()
+ if row.startswith('lease'):
+ address = row[6:-2]
+ elif row.startswith('iaaddr'):
+ address = row[7:-2]
+ elif row.startswith('ends'):
+ result[address]['ends'] = row[5:-1]
+ elif row.startswith('binding state'):
+ result[address]['state'] = row[14:-1]
+ return dict((k, v) for k, v in result.items() if len(v) == 2)
+ except (OSError, IOError):
+ return None
+
+
+class Pool:
+ def __init__(self, name, network):
+ self.id = re.sub(r'[:/.-]+', '_', name)
+ self.name = name
+ self.network = ipaddress.ip_network(address=u'%s' % network)
+
+ def num_hosts(self):
+ return self.network.num_addresses - 2
+
+ def __contains__(self, item):
+ return item.address in self.network
+
+
+class Lease:
+ def __init__(self, address, ends, state):
+ self.address = ipaddress.ip_address(address=u'%s' % address)
+ self.ends = ends
+ self.state = state
+
+ def is_active(self, current_time):
+ # lease_end_time might be epoch
+ if self.ends.startswith('epoch'):
+ epoch = int(self.ends.split()[1].replace(';', ''))
+ return epoch - current_time > 0
+ # max. int for lease-time causes lease to expire in year 2038.
+ # dhcpd puts 'never' in the ends section of active lease
+ elif self.ends == 'never':
+ return True
+ return time.mktime(time.strptime(self.ends, '%w %Y/%m/%d %H:%M:%S')) - current_time > 0
+
+ def is_valid(self):
+ return self.state == 'active'
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = deepcopy(CHARTS)
+
+ lease_path = self.configuration.get('leases_path', '/var/lib/dhcp/dhcpd.leases')
+ self.dhcpd_leases = DhcpdLeasesFile(path=lease_path)
+ self.pools = list()
+ self.data = dict()
+
+ # Will work only with 'default' db-time-format (weekday year/month/day hour:minute:second)
+ # TODO: update algorithm to parse correctly 'local' db-time-format
+
+ def check(self):
+ if not HAVE_IP_ADDRESS:
+ self.error("'python-ipaddress' module is needed")
+ return False
+
+ if not self.dhcpd_leases.is_valid():
+ self.error("Make sure '{path}' is exist and readable by netdata".format(path=self.dhcpd_leases.path))
+ return False
+
+ pools = self.configuration.get('pools')
+ if not pools:
+ self.error('Pools are not defined')
+ return False
+
+ for pool in pools:
+ try:
+ new_pool = Pool(name=pool, network=pools[pool])
+ except ValueError as error:
+ self.error("'{pool}' was removed, error: {error}".format(pool=pools[pool], error=error))
+ else:
+ self.pools.append(new_pool)
+
+ self.create_charts()
+ return bool(self.pools)
+
+ def get_data(self):
+ """
+ :return: dict
+ """
+ if not self.dhcpd_leases.is_changed():
+ return self.data
+
+ raw_leases = self.dhcpd_leases.get_data()
+ if not raw_leases:
+ self.data = dict()
+ return None
+
+ active_leases = list()
+ current_time = time.mktime(time.gmtime())
+
+ for address in raw_leases:
+ try:
+ new_lease = Lease(address, **raw_leases[address])
+ except ValueError:
+ continue
+ else:
+ if new_lease.is_active(current_time) and new_lease.is_valid():
+ active_leases.append(new_lease)
+
+ for pool in self.pools:
+ count = len([ip for ip in active_leases if ip in pool])
+ self.data[pool.id + '_active_leases'] = count
+ self.data[pool.id + '_utilization'] = float(count) / pool.num_hosts() * 10000
+
+ self.data['leases_size'] = self.dhcpd_leases.size
+ self.data['leases_total'] = len(active_leases)
+
+ return self.data
+
+ def create_charts(self):
+ for pool in self.pools:
+ self.definitions['pools_utilization']['lines'].append([pool.id + '_utilization', pool.name,
+ 'absolute', 1, 100])
+ self.definitions['pools_active_leases']['lines'].append([pool.id + '_active_leases', pool.name])
diff --git a/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf
new file mode 100644
index 000000000..4a4c4a5e3
--- /dev/null
+++ b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf
@@ -0,0 +1,81 @@
+# netdata python.d.plugin configuration for isc dhcpd leases
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, isc_dhcpd supports the following:
+#
+# leases_path: 'PATH' # the path to dhcpd.leases file
+# pools:
+# office: '192.168.2.0/24' # name(dimension): pool in CIDR format
+# wifi: '192.168.3.0/24' # name(dimension): pool in CIDR format
+# 192.168.4.0/24: '192.168.4.0/24' # name(dimension): pool in CIDR format
+#
+#-----------------------------------------------------------------------
+# IMPORTANT notes
+#
+# 1. Make sure leases file is readable by netdata.
+# 2. Current implementation works only with 'default' db-time-format
+# (weekday year/month/day hour:minute:second).
+# This is the default, so it will work in most cases.
+# 3. Pools MUST BE in CIDR format.
+#
+# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/linux_power_supply/Makefile.inc b/collectors/python.d.plugin/linux_power_supply/Makefile.inc
new file mode 100644
index 000000000..1864ba524
--- /dev/null
+++ b/collectors/python.d.plugin/linux_power_supply/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += linux_power_supply/linux_power_supply.chart.py
+dist_pythonconfig_DATA += linux_power_supply/linux_power_supply.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += linux_power_supply/README.md linux_power_supply/Makefile.inc
+
diff --git a/collectors/python.d.plugin/linux_power_supply/README.md b/collectors/python.d.plugin/linux_power_supply/README.md
new file mode 100644
index 000000000..5cfbe41ce
--- /dev/null
+++ b/collectors/python.d.plugin/linux_power_supply/README.md
@@ -0,0 +1,67 @@
+# linux\_power\_supply
+
+This module monitors variosu metrics reported by power supply drivers
+on Linux. This allows tracking and alerting on things like remaining
+battery capacity.
+
+Depending on the uderlying driver, it may provide the following charts
+and metrics:
+
+1. Capacity: The power supply capacity expressed as a percentage.
+ * capacity\_now
+
+2. Charge: The charge for the power supply, expressed as microamphours.
+ * charge\_full\_design
+ * charge\_full
+ * charge\_now
+ * charge\_empty
+ * charge\_empty\_design
+
+3. Energy: The energy for the power supply, expressed as microwatthours.
+ * energy\_full\_design
+ * energy\_full
+ * energy\_now
+ * energy\_empty
+ * energy\_empty\_design
+
+2. Voltage: The voltage for the power supply, expressed as microvolts.
+ * voltage\_max\_design
+ * voltage\_max
+ * voltage\_now
+ * voltage\_min
+ * voltage\_min\_design
+
+### configuration
+
+Sample:
+
+```yaml
+battery:
+ supply: 'BAT0'
+ charts: 'capacity charge energy voltage'
+```
+
+The `supply` key specifies the name of the power supply device to monitor.
+You can use `ls /sys/class/power_supply` to get a list of such devices
+on your system.
+
+The `charts` key is a space separated list of which charts to try
+to display. It defaults to trying to display everything.
+
+### notes
+
+* Most drivers provide at least the first chart. Battery powered ACPI
+compliant systems (like most laptops) provide all but the third, but do
+not provide all of the metrics for each chart.
+
+* Current, energy, and voltages are reported with a _very_ high precision
+by the power\_supply framework. Usually, this is far higher than the
+actual hardware supports reporting, so expect to see changes in these
+charts jump instead of scaling smoothly.
+
+* If `max` or `full` attribute is defined by the driver, but not a
+corresponding `min or `empty` attribute, then netdata will still provide
+the corresponding `min` or `empty`, which will then always read as zero.
+This way, alerts which match on these will still work.
+
+---
diff --git a/collectors/python.d.plugin/linux_power_supply/linux_power_supply.chart.py b/collectors/python.d.plugin/linux_power_supply/linux_power_supply.chart.py
new file mode 100644
index 000000000..71d834e5d
--- /dev/null
+++ b/collectors/python.d.plugin/linux_power_supply/linux_power_supply.chart.py
@@ -0,0 +1,160 @@
+# -*- coding: utf-8 -*-
+# Description: Linux power_supply netdata python.d module
+# Author: Austin S. Hemmelgarn (Ferroin)
+
+import os
+import platform
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+# Everything except percentages is reported as µ units.
+PRECISION = 10 ** 6
+
+# A priority of 90000 places us next to the other PSU related stuff.
+PRIORITY = 90000
+
+# We add our charts dynamically when we probe for the device attributes,
+# so these are empty by default.
+ORDER = []
+
+CHARTS = {}
+
+
+def get_capacity_chart(syspath):
+ # Capacity is measured in percent. We track one value.
+ options = [None, 'Capacity', '%', 'power_supply', 'power_supply.capacity', 'line']
+ lines = list()
+ attr_now = 'capacity'
+ if get_sysfs_value(os.path.join(syspath, attr_now)) is not None:
+ lines.append([attr_now, attr_now, 'absolute', 1, 1])
+ return {'capacity': {'options': options, 'lines': lines}}, [attr_now]
+ else:
+ return None, None
+
+
+def get_generic_chart(syspath, name, unit, maxname, minname):
+ # Used to generate charts for energy, charge, and voltage.
+ options = [None, name.title(), unit, 'power_supply', 'power_supply.{0}'.format(name), 'line']
+ lines = list()
+ attrlist = list()
+ attr_max_design = '{0}_{1}_design'.format(name, maxname)
+ attr_max = '{0}_{1}'.format(name, maxname)
+ attr_now = '{0}_now'.format(name)
+ attr_min = '{0}_{1}'.format(name, minname)
+ attr_min_design = '{0}_{1}_design'.format(name, minname)
+ if get_sysfs_value(os.path.join(syspath, attr_now)) is not None:
+ lines.append([attr_now, attr_now, 'absolute', 1, PRECISION])
+ attrlist.append(attr_now)
+ else:
+ return None, None
+ if get_sysfs_value(os.path.join(syspath, attr_max)) is not None:
+ lines.insert(0, [attr_max, attr_max, 'absolute', 1, PRECISION])
+ lines.append([attr_min, attr_min, 'absolute', 1, PRECISION])
+ attrlist.append(attr_max)
+ attrlist.append(attr_min)
+ elif get_sysfs_value(os.path.join(syspath, attr_min)) is not None:
+ lines.append([attr_min, attr_min, 'absolute', 1, PRECISION])
+ attrlist.append(attr_min)
+ if get_sysfs_value(os.path.join(syspath, attr_max_design)) is not None:
+ lines.insert(0, [attr_max_design, attr_max_design, 'absolute', 1, PRECISION])
+ lines.append([attr_min_design, attr_min_design, 'absolute', 1, PRECISION])
+ attrlist.append(attr_max_design)
+ attrlist.append(attr_min_design)
+ elif get_sysfs_value(os.path.join(syspath, attr_min_design)) is not None:
+ lines.append([attr_min_design, attr_min_design, 'absolute', 1, PRECISION])
+ attrlist.append(attr_min_design)
+ return {name: {'options': options, 'lines': lines}}, attrlist
+
+
+def get_charge_chart(syspath):
+ # Charge is measured in microamphours. We track up to five
+ # attributes.
+ return get_generic_chart(syspath, 'charge', 'µAh', 'full', 'empty')
+
+
+def get_energy_chart(syspath):
+ # Energy is measured in microwatthours. We track up to five
+ # attributes.
+ return get_generic_chart(syspath, 'energy', 'µWh', 'full', 'empty')
+
+
+def get_voltage_chart(syspath):
+ # Voltage is measured in microvolts. We track up to five attributes.
+ return get_generic_chart(syspath, 'voltage', 'µV', 'min', 'max')
+
+
+# This is a list of functions for generating charts. Used below to save
+# a bit of code (and to make it a bit easier to add new charts).
+GET_CHART = {
+ 'capacity': get_capacity_chart,
+ 'charge': get_charge_chart,
+ 'energy': get_energy_chart,
+ 'voltage': get_voltage_chart
+}
+
+
+# This opens the specified file and returns the value in it or None if
+# the file doesn't exist.
+def get_sysfs_value(filepath):
+ try:
+ with open(filepath, 'r') as datasource:
+ return int(datasource.read())
+ except (OSError, IOError):
+ return None
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.definitions = dict()
+ self.order = list()
+ self.attrlist = list()
+ self.supply = self.configuration.get('supply', None)
+ if self.supply is not None:
+ self.syspath = '/sys/class/power_supply/{0}'.format(self.supply)
+ self.types = self.configuration.get('charts', 'capacity').split()
+
+ def check(self):
+ if platform.system() != 'Linux':
+ self.error('Only supported on Linux.')
+ return False
+ if self.supply is None:
+ self.error('No power supply specified for monitoring.')
+ return False
+ if not self.types:
+ self.error('No attributes requested for monitoring.')
+ return False
+ if not os.access(self.syspath, os.R_OK):
+ self.error('Unable to access {0}'.format(self.syspath))
+ return False
+ return self.create_charts()
+
+ def create_charts(self):
+ chartset = set(GET_CHART).intersection(set(self.types))
+ if not chartset:
+ self.error('No valid attributes requested for monitoring.')
+ return False
+ charts = dict()
+ attrlist = list()
+ for item in chartset:
+ chart, attrs = GET_CHART[item](self.syspath)
+ if chart is not None:
+ charts.update(chart)
+ attrlist.extend(attrs)
+ if len(charts) == 0:
+ self.error('No charts can be created.')
+ return False
+ self.definitions.update(charts)
+ self.order.extend(sorted(charts))
+ self.attrlist.extend(attrlist)
+ return True
+
+ def _get_data(self):
+ data = dict()
+ for attr in self.attrlist:
+ attrpath = os.path.join(self.syspath, attr)
+ if attr.endswith(('_min', '_min_design', '_empty', '_empty_design')):
+ data[attr] = get_sysfs_value(attrpath) or 0
+ else:
+ data[attr] = get_sysfs_value(attrpath)
+ return data
diff --git a/collectors/python.d.plugin/linux_power_supply/linux_power_supply.conf b/collectors/python.d.plugin/linux_power_supply/linux_power_supply.conf
new file mode 100644
index 000000000..3cb610f7f
--- /dev/null
+++ b/collectors/python.d.plugin/linux_power_supply/linux_power_supply.conf
@@ -0,0 +1,81 @@
+# netdata python.d.plugin configuration for linux_power_supply
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_everye
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# In addition to the above parameters, linux_power_supply also supports
+# the following extra parameters.
+#
+# supply: '' # the name of the power supply to monitor
+# charts: 'capacity' # a space separated list of the charts to try
+# # and generate valid charts are 'capacity',
+# # 'charge', 'current', and 'voltage'
+#
+# Note that linux_power_supply will not automatically detect power
+# supplies in the system, you have to manually specify which ones you
+# want it to monitor.
+#
+# The following config will work to monitor the first battery in most
+# ACPI compliant battery powered systems (such as most laptops).
+#
+# battery:
+# name: battery
+# supply: BAT0
diff --git a/collectors/python.d.plugin/litespeed/Makefile.inc b/collectors/python.d.plugin/litespeed/Makefile.inc
new file mode 100644
index 000000000..5dd645020
--- /dev/null
+++ b/collectors/python.d.plugin/litespeed/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += litespeed/litespeed.chart.py
+dist_pythonconfig_DATA += litespeed/litespeed.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += litespeed/README.md litespeed/Makefile.inc
+
diff --git a/collectors/python.d.plugin/litespeed/README.md b/collectors/python.d.plugin/litespeed/README.md
new file mode 100644
index 000000000..d1482f33c
--- /dev/null
+++ b/collectors/python.d.plugin/litespeed/README.md
@@ -0,0 +1,47 @@
+# litespeed
+
+Module monitor litespeed web server performance metrics.
+
+It produces:
+
+1. **Network Throughput HTTP** in kilobits/s
+ * in
+ * out
+
+2. **Network Throughput HTTPS** in kilobits/s
+ * in
+ * out
+
+3. **Connections HTTP** in connections
+ * free
+ * used
+
+4. **Connections HTTPS** in connections
+ * free
+ * used
+
+5. **Requests** in requests/s
+ * requests
+
+6. **Requests In Processing** in requests
+ * processing
+
+7. **Public Cache Hits** in hits/s
+ * hits
+
+8. **Private Cache Hits** in hits/s
+ * hits
+
+9. **Static Hits** in hits/s
+ * hits
+
+
+### configuration
+```yaml
+local:
+ path : 'PATH'
+```
+
+If no configuration is given, module will use "/tmp/lshttpd/".
+
+---
diff --git a/collectors/python.d.plugin/litespeed/litespeed.chart.py b/collectors/python.d.plugin/litespeed/litespeed.chart.py
new file mode 100644
index 000000000..efdc6869c
--- /dev/null
+++ b/collectors/python.d.plugin/litespeed/litespeed.chart.py
@@ -0,0 +1,186 @@
+# -*- coding: utf-8 -*-
+# Description: litespeed netdata python.d module
+# Author: Ilya Maschenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import glob
+import re
+import os
+
+from collections import namedtuple
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+
+update_every = 10
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = [
+ 'net_throughput_http', 'net_throughput_https', # net throughput
+ 'connections_http', 'connections_https', # connections
+ 'requests', 'requests_processing', # requests
+ 'pub_cache_hits', 'private_cache_hits', # cache
+ 'static_hits' # static
+]
+
+CHARTS = {
+ 'net_throughput_http': {
+ 'options': [None, 'Network Throughput HTTP', 'kilobits/s', 'net throughput',
+ 'litespeed.net_throughput', 'area'],
+ 'lines': [
+ ['bps_in', 'in', 'absolute'],
+ ['bps_out', 'out', 'absolute', -1]
+ ]
+ },
+ 'net_throughput_https': {
+ 'options': [None, 'Network Throughput HTTPS', 'kilobits/s', 'net throughput',
+ 'litespeed.net_throughput', 'area'],
+ 'lines': [
+ ['ssl_bps_in', 'in', 'absolute'],
+ ['ssl_bps_out', 'out', 'absolute', -1]
+ ]
+ },
+ 'connections_http': {
+ 'options': [None, 'Connections HTTP', 'conns', 'connections', 'litespeed.connections', 'stacked'],
+ 'lines': [
+ ['conn_free', 'free', 'absolute'],
+ ['conn_used', 'used', 'absolute']
+ ]
+ },
+ 'connections_https': {
+ 'options': [None, 'Connections HTTPS', 'conns', 'connections', 'litespeed.connections', 'stacked'],
+ 'lines': [
+ ['ssl_conn_free', 'free', 'absolute'],
+ ['ssl_conn_used', 'used', 'absolute']
+ ]
+ },
+ 'requests': {
+ 'options': [None, 'Requests', 'requests/s', 'requests', 'litespeed.requests', 'line'],
+ 'lines': [
+ ['requests', None, 'absolute', 1, 100]
+ ]
+ },
+ 'requests_processing': {
+ 'options': [None, 'Requests In Processing', 'requests', 'requests', 'litespeed.requests_processing', 'line'],
+ 'lines': [
+ ['requests_processing', 'processing', 'absolute']
+ ]
+ },
+ 'pub_cache_hits': {
+ 'options': [None, 'Public Cache Hits', 'hits/s', 'cache', 'litespeed.cache', 'line'],
+ 'lines': [
+ ['pub_cache_hits', 'hits', 'absolute', 1, 100]
+ ]
+ },
+ 'private_cache_hits': {
+ 'options': [None, 'Private Cache Hits', 'hits/s', 'cache', 'litespeed.cache', 'line'],
+ 'lines': [
+ ['private_cache_hits', 'hits', 'absolute', 1, 100]
+ ]
+ },
+ 'static_hits': {
+ 'options': [None, 'Static Hits', 'hits/s', 'static', 'litespeed.static', 'line'],
+ 'lines': [
+ ['static_hits', 'hits', 'absolute', 1, 100]
+ ]
+ }
+}
+
+t = namedtuple('T', ['key', 'id', 'mul'])
+
+T = [
+ t('BPS_IN', 'bps_in', 8),
+ t('BPS_OUT', 'bps_out', 8),
+ t('SSL_BPS_IN', 'ssl_bps_in', 8),
+ t('SSL_BPS_OUT', 'ssl_bps_out', 8),
+ t('REQ_PER_SEC', 'requests', 100),
+ t('REQ_PROCESSING', 'requests_processing', 1),
+ t('PUB_CACHE_HITS_PER_SEC', 'pub_cache_hits', 100),
+ t('PRIVATE_CACHE_HITS_PER_SEC', 'private_cache_hits', 100),
+ t('STATIC_HITS_PER_SEC', 'static_hits', 100),
+ t('PLAINCONN', 'conn_used', 1),
+ t('AVAILCONN', 'conn_free', 1),
+ t('SSLCONN', 'ssl_conn_used', 1),
+ t('AVAILSSL', 'ssl_conn_free', 1),
+]
+
+RE = re.compile(r'([A-Z_]+): ([0-9.]+)')
+
+ZERO_DATA = {
+ 'bps_in': 0,
+ 'bps_out': 0,
+ 'ssl_bps_in': 0,
+ 'ssl_bps_out': 0,
+ 'requests': 0,
+ 'requests_processing': 0,
+ 'pub_cache_hits': 0,
+ 'private_cache_hits': 0,
+ 'static_hits': 0,
+ 'conn_used': 0,
+ 'conn_free': 0,
+ 'ssl_conn_used': 0,
+ 'ssl_conn_free': 0,
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.path = self.configuration.get('path', '/tmp/lshttpd/')
+ self.files = list()
+
+ def check(self):
+ if not self.path:
+ self.error('"path" not specified')
+ return False
+
+ fs = glob.glob(os.path.join(self.path, '.rtreport*'))
+
+ if not fs:
+ self.error('"{0}" has no "rtreport" files or dir is not readable'.format(self.path))
+ return None
+
+ self.debug('stats files:', fs)
+
+ for f in fs:
+ if not is_readable_file(f):
+ self.error('{0} is not readable'.format(f))
+ continue
+ self.files.append(f)
+
+ return bool(self.files)
+
+ def get_data(self):
+ """
+ Format data received from http request
+ :return: dict
+ """
+ data = dict(ZERO_DATA)
+
+ for f in self.files:
+ try:
+ with open(f) as b:
+ lines = b.readlines()
+ except (OSError, IOError) as err:
+ self.error(err)
+ return None
+ else:
+ parse_file(data, lines)
+
+ return data
+
+
+def parse_file(data, lines):
+ for line in lines:
+ if not line.startswith(('BPS_IN:', 'MAXCONN:', 'REQ_RATE []:')):
+ continue
+ m = dict(RE.findall(line))
+ for v in T:
+ if v.key in m:
+ data[v.id] += float(m[v.key]) * v.mul
+
+
+def is_readable_file(v):
+ return os.path.isfile(v) and os.access(v, os.R_OK)
diff --git a/collectors/python.d.plugin/litespeed/litespeed.conf b/collectors/python.d.plugin/litespeed/litespeed.conf
new file mode 100644
index 000000000..17d0f690e
--- /dev/null
+++ b/collectors/python.d.plugin/litespeed/litespeed.conf
@@ -0,0 +1,74 @@
+# netdata python.d.plugin configuration for litespeed
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, lightspeed also supports the following:
+#
+# path: 'PATH' # path to lightspeed stats files directory
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ path : '/tmp/lshttpd/'
diff --git a/collectors/python.d.plugin/logind/Makefile.inc b/collectors/python.d.plugin/logind/Makefile.inc
new file mode 100644
index 000000000..adadab120
--- /dev/null
+++ b/collectors/python.d.plugin/logind/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += logind/logind.chart.py
+dist_pythonconfig_DATA += logind/logind.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += logind/README.md logind/Makefile.inc
+
diff --git a/collectors/python.d.plugin/logind/README.md b/collectors/python.d.plugin/logind/README.md
new file mode 100644
index 000000000..8f8670d4a
--- /dev/null
+++ b/collectors/python.d.plugin/logind/README.md
@@ -0,0 +1,54 @@
+# logind
+
+This module monitors active sessions, users, and seats tracked by systemd-logind or elogind.
+
+It provides the following charts:
+
+1. **Sessions** Tracks the total number of sessions.
+ * Graphical: Local graphical sessions (running X11, or Wayland, or something else).
+ * Console: Local console sessions.
+ * Remote: Remote sessions.
+
+2. **Users** Tracks total number of unique user logins of each type.
+ * Graphical
+ * Console
+ * Remote
+
+3. **Seats** Total number of seats in use.
+ * Seats
+
+### configuration
+
+This module needs no configuration. Just make sure the netdata user
+can run the `loginctl` command and get a session list without having to
+specify a path.
+
+This will work with any command that can output data in the _exact_
+same format as `loginctl list-sessions --no-legend`. If you have some
+other command you want to use that outputs data in this format, you can
+specify it using the `command` key like so:
+
+```yaml
+command: '/path/to/other/command'
+```
+
+### notes
+
+* This module's ability to track logins is dependent on what PAM services
+are configured to register sessions with logind. In particular, for
+most systems, it will only track TTY logins, local desktop logins,
+and logins through remote shell connections.
+
+* The users chart counts _usernames_ not UID's. This is potentially
+important in configurations where multiple users have the same UID.
+
+* The users chart counts any given user name up to once for _each_ type
+of login. So if the same user has a graphical and a console login on a
+system, they will show up once in the graphical count, and once in the
+console count.
+
+* Because the data collection process is rather expensive, this plugin
+is currently disabled by default, and needs to be explicitly enabled in
+`/etc/netdata/python.d.conf` before it will run.
+
+---
diff --git a/collectors/python.d.plugin/logind/logind.chart.py b/collectors/python.d.plugin/logind/logind.chart.py
new file mode 100644
index 000000000..bfc486c7f
--- /dev/null
+++ b/collectors/python.d.plugin/logind/logind.chart.py
@@ -0,0 +1,79 @@
+# -*- coding: utf-8 -*-
+# Description: logind netdata python.d module
+# Author: Austin S. Hemmelgarn (Ferroin)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from bases.FrameworkServices.ExecutableService import ExecutableService
+
+priority = 59999
+disabled_by_default = True
+
+ORDER = ['sessions', 'users', 'seats']
+
+CHARTS = {
+ 'sessions': {
+ 'options': [None, 'Logind Sessions', 'sessions', 'sessions', 'logind.sessions', 'stacked'],
+ 'lines': [
+ ['sessions_graphical', 'Graphical', 'absolute', 1, 1],
+ ['sessions_console', 'Console', 'absolute', 1, 1],
+ ['sessions_remote', 'Remote', 'absolute', 1, 1]
+ ]
+ },
+ 'users': {
+ 'options': [None, 'Logind Users', 'users', 'users', 'logind.users', 'stacked'],
+ 'lines': [
+ ['users_graphical', 'Graphical', 'absolute', 1, 1],
+ ['users_console', 'Console', 'absolute', 1, 1],
+ ['users_remote', 'Remote', 'absolute', 1, 1]
+ ]
+ },
+ 'seats': {
+ 'options': [None, 'Logind Seats', 'seats', 'seats', 'logind.seats', 'line'],
+ 'lines': [
+ ['seats', 'Active Seats', 'absolute', 1, 1]
+ ]
+ }
+}
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(self, configuration=configuration, name=name)
+ self.command = 'loginctl list-sessions --no-legend'
+ self.order = ORDER
+ self.definitions = CHARTS
+
+ def _get_data(self):
+ ret = {
+ 'sessions_graphical': 0,
+ 'sessions_console': 0,
+ 'sessions_remote': 0,
+ }
+ users = {
+ 'graphical': list(),
+ 'console': list(),
+ 'remote': list()
+ }
+ seats = list()
+ data = self._get_raw_data()
+
+ for item in data:
+ fields = item.split()
+ if len(fields) == 3:
+ users['remote'].append(fields[2])
+ ret['sessions_remote'] += 1
+ elif len(fields) == 4:
+ users['graphical'].append(fields[2])
+ ret['sessions_graphical'] += 1
+ seats.append(fields[3])
+ elif len(fields) == 5:
+ users['console'].append(fields[2])
+ ret['sessions_console'] += 1
+ seats.append(fields[3])
+
+ ret['users_graphical'] = len(set(users['graphical']))
+ ret['users_console'] = len(set(users['console']))
+ ret['users_remote'] = len(set(users['remote']))
+ ret['seats'] = len(set(seats))
+
+ return ret
diff --git a/collectors/python.d.plugin/logind/logind.conf b/collectors/python.d.plugin/logind/logind.conf
new file mode 100644
index 000000000..0623493de
--- /dev/null
+++ b/collectors/python.d.plugin/logind/logind.conf
@@ -0,0 +1,62 @@
+# netdata python.d.plugin configuration for logind
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
diff --git a/collectors/python.d.plugin/mdstat/Makefile.inc b/collectors/python.d.plugin/mdstat/Makefile.inc
new file mode 100644
index 000000000..5125a271b
--- /dev/null
+++ b/collectors/python.d.plugin/mdstat/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += mdstat/mdstat.chart.py
+dist_pythonconfig_DATA += mdstat/mdstat.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += mdstat/README.md mdstat/Makefile.inc
+
diff --git a/collectors/python.d.plugin/mdstat/README.md b/collectors/python.d.plugin/mdstat/README.md
new file mode 100644
index 000000000..1ff8f7dab
--- /dev/null
+++ b/collectors/python.d.plugin/mdstat/README.md
@@ -0,0 +1,26 @@
+# mdstat
+
+Module monitor /proc/mdstat
+
+It produces:
+
+1. **Health** Number of failed disks in every array (aggregate chart).
+
+2. **Disks stats**
+ * total (number of devices array ideally would have)
+ * inuse (number of devices currently are in use)
+
+3. **Current status**
+ * resync in percent
+ * recovery in percent
+ * reshape in percent
+ * check in percent
+
+4. **Operation status** (if resync/recovery/reshape/check is active)
+ * finish in minutes
+ * speed in megabytes/s
+
+### configuration
+No configuration is needed.
+
+---
diff --git a/collectors/python.d.plugin/mdstat/mdstat.chart.py b/collectors/python.d.plugin/mdstat/mdstat.chart.py
new file mode 100644
index 000000000..b7306b6a7
--- /dev/null
+++ b/collectors/python.d.plugin/mdstat/mdstat.chart.py
@@ -0,0 +1,205 @@
+# -*- coding: utf-8 -*-
+# Description: mdstat netdata python.d module
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import re
+
+from collections import defaultdict
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+MDSTAT = '/proc/mdstat'
+MISMATCH_CNT = '/sys/block/{0}/md/mismatch_cnt'
+
+ORDER = ['mdstat_health']
+
+CHARTS = {
+ 'mdstat_health': {
+ 'options': [None, 'Faulty Devices In MD', 'failed disks', 'health', 'md.health', 'line'],
+ 'lines': []
+ }
+}
+
+RE_DISKS = re.compile(r' (?P<array>[a-zA-Z_0-9]+) : active .+\['
+ r'(?P<total_disks>[0-9]+)/'
+ r'(?P<inuse_disks>[0-9]+)\]')
+
+RE_STATUS = re.compile(r' (?P<array>[a-zA-Z_0-9]+) : active .+ '
+ r'(?P<operation>[a-z]+) =[ ]{1,2}'
+ r'(?P<operation_status>[0-9.]+).+finish='
+ r'(?P<finish_in>([0-9.]+))min speed='
+ r'(?P<speed>[0-9]+)')
+
+
+def md_charts(name):
+ order = [
+ '{0}_disks'.format(name),
+ '{0}_operation'.format(name),
+ '{0}_mismatch_cnt'.format(name),
+ '{0}_finish'.format(name),
+ '{0}_speed'.format(name)
+ ]
+
+ charts = dict()
+ charts[order[0]] = {
+ 'options': [None, 'Disks Stats', 'disks', name, 'md.disks', 'stacked'],
+ 'lines': [
+ ['{0}_total_disks'.format(name), 'total', 'absolute'],
+ ['{0}_inuse_disks'.format(name), 'inuse', 'absolute']
+ ]
+ }
+
+ charts[order[1]] = {
+ 'options': [None, 'Current Status', 'percent', name, 'md.status', 'line'],
+ 'lines': [
+ ['{0}_resync'.format(name), 'resync', 'absolute', 1, 100],
+ ['{0}_recovery'.format(name), 'recovery', 'absolute', 1, 100],
+ ['{0}_reshape'.format(name), 'reshape', 'absolute', 1, 100],
+ ['{0}_check'.format(name), 'check', 'absolute', 1, 100],
+ ]
+ }
+
+ charts[order[2]] = {
+ 'options': [None, 'Mismatch Count', 'unsynchronized blocks', name, 'md.mismatch_cnt', 'line'],
+ 'lines': [
+ ['{0}_mismatch_cnt'.format(name), 'count', 'absolute']
+ ]
+ }
+
+ charts[order[3]] = {
+ 'options': [None, 'Approximate Time Until Finish', 'seconds', name, 'md.rate', 'line'],
+ 'lines': [
+ ['{0}_finish_in'.format(name), 'finish in', 'absolute', 1, 1000]
+ ]
+ }
+
+ charts[order[4]] = {
+ 'options': [None, 'Operation Speed', 'KB/s', name, 'md.rate', 'line'],
+ 'lines': [
+ ['{0}_speed'.format(name), 'speed', 'absolute', 1, 1000]
+ ]
+ }
+
+ return order, charts
+
+
+class MD:
+ def __init__(self, raw_data):
+ self.name = raw_data['array']
+ self.d = raw_data
+
+ def data(self):
+ rv = {
+ 'total_disks': self.d['total_disks'],
+ 'inuse_disks': self.d['inuse_disks'],
+ 'health': int(self.d['total_disks']) - int(self.d['inuse_disks']),
+ 'resync': 0,
+ 'recovery': 0,
+ 'reshape': 0,
+ 'check': 0,
+ 'finish_in': 0,
+ 'speed': 0,
+ }
+
+ v = read_lines(MISMATCH_CNT.format(self.name))
+ if v:
+ rv['mismatch_cnt'] = v
+
+ if self.d.get('operation'):
+ rv[self.d['operation']] = float(self.d['operation_status']) * 100
+ rv['finish_in'] = float(self.d['finish_in']) * 1000 * 60
+ rv['speed'] = float(self.d['speed']) * 1000
+
+ return dict(('{0}_{1}'.format(self.name, k), v) for k, v in rv.items())
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.mds = list()
+
+ @staticmethod
+ def get_mds():
+ raw = read_lines(MDSTAT)
+
+ if not raw:
+ return None
+
+ return find_mds(raw)
+
+ def get_data(self):
+ """
+ Parse data from _get_raw_data()
+ :return: dict
+ """
+ mds = self.get_mds()
+
+ if not mds:
+ return None
+
+ data = dict()
+ for md in mds:
+ if md.name not in self.mds:
+ self.mds.append(md.name)
+ self.add_new_md_charts(md.name)
+ data.update(md.data())
+ return data
+
+ def check(self):
+ if not self.get_mds():
+ self.error('Failed to read data from {0} or there is no active arrays'.format(MDSTAT))
+ return False
+ return True
+
+ def add_new_md_charts(self, name):
+ order, charts = md_charts(name)
+
+ self.charts['mdstat_health'].add_dimension(['{0}_health'.format(name), name])
+
+ for chart_name in order:
+ params = [chart_name] + charts[chart_name]['options']
+ dims = charts[chart_name]['lines']
+
+ chart = self.charts.add_chart(params)
+ for dim in dims:
+ chart.add_dimension(dim)
+
+
+def find_mds(raw_data):
+ data = defaultdict(str)
+ counter = 1
+
+ for row in (elem.strip() for elem in raw_data):
+ if not row:
+ counter += 1
+ continue
+ data[counter] = ' '.join([data[counter], row])
+
+ mds = list()
+
+ for v in data.values():
+ m = RE_DISKS.search(v)
+
+ if not m:
+ continue
+
+ d = m.groupdict()
+
+ m = RE_STATUS.search(v)
+ if m:
+ d.update(m.groupdict())
+
+ mds.append(MD(d))
+
+ return sorted(mds, key=lambda md: md.name)
+
+
+def read_lines(path):
+ try:
+ with open(path) as f:
+ return f.readlines()
+ except (IOError, OSError):
+ return None
diff --git a/collectors/python.d.plugin/mdstat/mdstat.conf b/collectors/python.d.plugin/mdstat/mdstat.conf
new file mode 100644
index 000000000..66a2f153c
--- /dev/null
+++ b/collectors/python.d.plugin/mdstat/mdstat.conf
@@ -0,0 +1,32 @@
+# netdata python.d.plugin configuration for mdstat
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
diff --git a/collectors/python.d.plugin/megacli/Makefile.inc b/collectors/python.d.plugin/megacli/Makefile.inc
new file mode 100644
index 000000000..83680d723
--- /dev/null
+++ b/collectors/python.d.plugin/megacli/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += megacli/megacli.chart.py
+dist_pythonconfig_DATA += megacli/megacli.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += megacli/README.md megacli/Makefile.inc
+
diff --git a/collectors/python.d.plugin/megacli/README.md b/collectors/python.d.plugin/megacli/README.md
new file mode 100644
index 000000000..d288a6353
--- /dev/null
+++ b/collectors/python.d.plugin/megacli/README.md
@@ -0,0 +1,48 @@
+# megacli
+
+Module collects adapter, physical drives and battery stats.
+
+**Requirements:**
+ * `megacli` program
+ * `sudo` program
+ * `netdata` user needs to be able to be able to sudo the `megacli` program without password
+
+To grab stats it executes:
+ * `sudo -n megacli -LDPDInfo -aAll`
+ * `sudo -n megacli -AdpBbuCmd -a0`
+
+
+It produces:
+
+1. **Adapter State**
+
+2. **Physical Drives Media Errors**
+
+3. **Physical Drives Predictive Failures**
+
+4. **Battery Relative State of Charge**
+
+5. **Battery Cycle Count**
+
+### prerequisite
+This module uses `megacli` which can only be executed by root. It uses
+`sudo` and assumes that it is configured such that the `netdata` user can
+execute `megacli` as root without password.
+
+Add to `sudoers`:
+
+ netdata ALL=(root) NOPASSWD: /path/to/megacli
+
+### configuration
+
+**megacli** is disabled by default. Should be explicitly enabled in `python.d.conf`.
+```yaml
+megacli: yes
+```
+
+Battery stats disabled by default. To enable them modify `megacli.conf`.
+```yaml
+do_battery: yes
+```
+
+---
diff --git a/collectors/python.d.plugin/megacli/megacli.chart.py b/collectors/python.d.plugin/megacli/megacli.chart.py
new file mode 100644
index 000000000..41a1079f6
--- /dev/null
+++ b/collectors/python.d.plugin/megacli/megacli.chart.py
@@ -0,0 +1,279 @@
+# -*- coding: utf-8 -*-
+# Description: megacli netdata python.d module
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+import re
+
+from bases.FrameworkServices.ExecutableService import ExecutableService
+from bases.collection import find_binary
+
+
+disabled_by_default = True
+
+update_every = 5
+
+
+def adapter_charts(ads):
+ order = [
+ 'adapter_degraded',
+ ]
+
+ def dims(ad):
+ return [['adapter_{0}_degraded'.format(a.id), 'adapter {0}'.format(a.id)] for a in ad]
+
+ charts = {
+ 'adapter_degraded': {
+ 'options': [None, 'Adapter State', 'is degraded', 'adapter', 'megacli.adapter_degraded', 'line'],
+ 'lines': dims(ads)
+ },
+ }
+
+ return order, charts
+
+
+def pd_charts(pds):
+ order = [
+ 'pd_media_error',
+ 'pd_predictive_failure',
+ ]
+
+ def dims(k, pd):
+ return [['slot_{0}_{1}'.format(p.id, k), 'slot {0}'.format(p.id), 'incremental'] for p in pd]
+
+ charts = {
+ 'pd_media_error': {
+ 'options': [None, 'Physical Drives Media Errors', 'errors/s', 'pd', 'megacli.pd_media_error', 'line'],
+ 'lines': dims('media_error', pds)
+ },
+ 'pd_predictive_failure': {
+ 'options': [None, 'Physical Drives Predictive Failures', 'failures/s', 'pd',
+ 'megacli.pd_predictive_failure', 'line'],
+ 'lines': dims('predictive_failure', pds)
+ }
+ }
+
+ return order, charts
+
+
+def battery_charts(bats):
+ order = list()
+ charts = dict()
+
+ for b in bats:
+ order.append('bbu_{0}_relative_charge'.format(b.id))
+ charts.update(
+ {
+ 'bbu_{0}_relative_charge'.format(b.id): {
+ 'options': [None, 'Relative State of Charge', '%', 'battery',
+ 'megacli.bbu_relative_charge', 'line'],
+ 'lines': [
+ ['bbu_{0}_relative_charge'.format(b.id), 'adapter {0}'.format(b.id)],
+ ]
+ }
+ }
+ )
+
+ for b in bats:
+ order.append('bbu_{0}_cycle_count'.format(b.id))
+ charts.update(
+ {
+ 'bbu_{0}_cycle_count'.format(b.id): {
+ 'options': [None, 'Cycle Count', 'cycle count', 'battery', 'megacli.bbu_cycle_count', 'line'],
+ 'lines': [
+ ['bbu_{0}_cycle_count'.format(b.id), 'adapter {0}'.format(b.id)],
+ ]
+ }
+ }
+ )
+
+ return order, charts
+
+
+RE_ADAPTER = re.compile(
+ r'Adapter #([0-9]+) State(?:\s+)?: ([a-zA-Z]+)'
+)
+
+RE_VD = re.compile(
+ r'Slot Number: ([0-9]+) Media Error Count: ([0-9]+) Predictive Failure Count: ([0-9]+)'
+)
+
+RE_BATTERY = re.compile(
+ r'BBU Capacity Info for Adapter: ([0-9]+) Relative State of Charge: ([0-9]+) % Cycle Count: ([0-9]+)'
+)
+
+
+def find_adapters(d):
+ keys = ('Adapter #', 'State')
+ d = ' '.join(v.strip() for v in d if v.startswith(keys))
+ return [Adapter(*v) for v in RE_ADAPTER.findall(d)]
+
+
+def find_pds(d):
+ keys = ('Slot Number', 'Media Error Count', 'Predictive Failure Count')
+ d = ' '.join(v.strip() for v in d if v.startswith(keys))
+ return [PD(*v) for v in RE_VD.findall(d)]
+
+
+def find_batteries(d):
+ keys = ('BBU Capacity Info for Adapter', 'Relative State of Charge', 'Cycle Count')
+ d = ' '.join(v.strip() for v in d if v.strip().startswith(keys))
+ return [Battery(*v) for v in RE_BATTERY.findall(d)]
+
+
+class Adapter:
+ def __init__(self, n, state):
+ self.id = n
+ self.state = int(state == 'Degraded')
+
+ def data(self):
+ return {
+ 'adapter_{0}_degraded'.format(self.id): self.state,
+ }
+
+
+class PD:
+ def __init__(self, n, media_err, predict_fail):
+ self.id = n
+ self.media_err = media_err
+ self.predict_fail = predict_fail
+
+ def data(self):
+ return {
+ 'slot_{0}_media_error'.format(self.id): self.media_err,
+ 'slot_{0}_predictive_failure'.format(self.id): self.predict_fail,
+ }
+
+
+class Battery:
+ def __init__(self, adapt_id, rel_charge, cycle_count):
+ self.id = adapt_id
+ self.rel_charge = rel_charge
+ self.cycle_count = cycle_count
+
+ def data(self):
+ return {
+ 'bbu_{0}_relative_charge'.format(self.id): self.rel_charge,
+ 'bbu_{0}_cycle_count'.format(self.id): self.cycle_count,
+ }
+
+
+# TODO: hardcoded sudo...
+class Megacli:
+ def __init__(self):
+ self.s = find_binary('sudo')
+ self.m = find_binary('megacli')
+ self.sudo_check = [self.s, '-n', '-v']
+ self.disk_info = [self.s, '-n', self.m, '-LDPDInfo', '-aAll', '-NoLog']
+ self.battery_info = [self.s, '-n', self.m, '-AdpBbuCmd', '-a0', '-NoLog']
+
+ def __bool__(self):
+ return bool(self.s and self.m)
+
+ def __nonzero__(self):
+ return self.__bool__()
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(self, configuration=configuration, name=name)
+ self.order = list()
+ self.definitions = dict()
+ self.megacli = Megacli()
+ self.do_battery = self.configuration.get('do_battery')
+
+ def check_sudo(self):
+ err = self._get_raw_data(command=self.megacli.sudo_check, stderr=True)
+ if err:
+ self.error(''.join(err))
+ return False
+ return True
+
+ def check_disk_info(self):
+ d = self._get_raw_data(command=self.megacli.disk_info)
+ if not d:
+ return False
+
+ ads = find_adapters(d)
+ pds = find_pds(d)
+
+ if not (ads and pds):
+ self.error('failed to parse "{0}" output'.format(' '.join(self.megacli.disk_info)))
+ return False
+
+ o, c = adapter_charts(ads)
+ self.order.extend(o)
+ self.definitions.update(c)
+
+ o, c = pd_charts(pds)
+ self.order.extend(o)
+ self.definitions.update(c)
+
+ return True
+
+ def check_battery(self):
+ d = self._get_raw_data(command=self.megacli.battery_info)
+ if not d:
+ return False
+
+ bats = find_batteries(d)
+
+ if not bats:
+ self.error('failed to parse "{0}" output'.format(' '.join(self.megacli.battery_info)))
+ return False
+
+ o, c = battery_charts(bats)
+ self.order.extend(o)
+ self.definitions.update(c)
+ return True
+
+ def check(self):
+ if not self.megacli:
+ self.error('can\'t locate "sudo" or "megacli" binary')
+ return None
+
+ if not (self.check_sudo() and self.check_disk_info()):
+ return False
+
+ if self.do_battery:
+ self.do_battery = self.check_battery()
+
+ return True
+
+ def get_data(self):
+ data = dict()
+
+ data.update(self.get_adapter_pd_data())
+
+ if self.do_battery:
+ data.update(self.get_battery_data())
+
+ return data or None
+
+ def get_adapter_pd_data(self):
+ raw = self._get_raw_data(command=self.megacli.disk_info)
+ data = dict()
+
+ if not raw:
+ return data
+
+ for a in find_adapters(raw):
+ data.update(a.data())
+
+ for p in find_pds(raw):
+ data.update(p.data())
+
+ return data
+
+ def get_battery_data(self):
+ raw = self._get_raw_data(command=self.megacli.battery_info)
+ data = dict()
+
+ if not raw:
+ return data
+
+ for b in find_batteries(raw):
+ data.update(b.data())
+
+ return data
diff --git a/collectors/python.d.plugin/megacli/megacli.conf b/collectors/python.d.plugin/megacli/megacli.conf
new file mode 100644
index 000000000..73afb2f7f
--- /dev/null
+++ b/collectors/python.d.plugin/megacli/megacli.conf
@@ -0,0 +1,62 @@
+# netdata python.d.plugin configuration for megacli
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, megacli also supports the following:
+#
+# do_battery: yes/no # default is no. Battery stats (adds additional call to megacli `megacli -AdpBbuCmd -a0`).
+#
+# ----------------------------------------------------------------------
+# uncomment the line below to collect battery statistics
+# do_battery: yes
diff --git a/collectors/python.d.plugin/memcached/Makefile.inc b/collectors/python.d.plugin/memcached/Makefile.inc
new file mode 100644
index 000000000..e60357161
--- /dev/null
+++ b/collectors/python.d.plugin/memcached/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += memcached/memcached.chart.py
+dist_pythonconfig_DATA += memcached/memcached.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += memcached/README.md memcached/Makefile.inc
+
diff --git a/collectors/python.d.plugin/memcached/README.md b/collectors/python.d.plugin/memcached/README.md
new file mode 100644
index 000000000..3521c109d
--- /dev/null
+++ b/collectors/python.d.plugin/memcached/README.md
@@ -0,0 +1,69 @@
+# memcached
+
+Memcached monitoring module. Data grabbed from [stats interface](https://github.com/memcached/memcached/wiki/Commands#stats).
+
+1. **Network** in kilobytes/s
+ * read
+ * written
+
+2. **Connections** per second
+ * current
+ * rejected
+ * total
+
+3. **Items** in cluster
+ * current
+ * total
+
+4. **Evicted and Reclaimed** items
+ * evicted
+ * reclaimed
+
+5. **GET** requests/s
+ * hits
+ * misses
+
+6. **GET rate** rate in requests/s
+ * rate
+
+7. **SET rate** rate in requests/s
+ * rate
+
+8. **DELETE** requests/s
+ * hits
+ * misses
+
+9. **CAS** requests/s
+ * hits
+ * misses
+ * bad value
+
+10. **Increment** requests/s
+ * hits
+ * misses
+
+11. **Decrement** requests/s
+ * hits
+ * misses
+
+12. **Touch** requests/s
+ * hits
+ * misses
+
+13. **Touch rate** rate in requests/s
+ * rate
+
+### configuration
+
+Sample:
+
+```yaml
+localtcpip:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 24242
+```
+
+If no configuration is given, module will attempt to connect to memcached instance on `127.0.0.1:11211` address.
+
+---
diff --git a/collectors/python.d.plugin/memcached/memcached.chart.py b/collectors/python.d.plugin/memcached/memcached.chart.py
new file mode 100644
index 000000000..3c310ec69
--- /dev/null
+++ b/collectors/python.d.plugin/memcached/memcached.chart.py
@@ -0,0 +1,198 @@
+# -*- coding: utf-8 -*-
+# Description: memcached netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from bases.FrameworkServices.SocketService import SocketService
+
+# default module values (can be overridden per job in `config`)
+# update_every = 2
+priority = 60000
+retries = 60
+
+# default job configuration (overridden by python.d.plugin)
+# config = {'local': {
+# 'update_every': update_every,
+# 'retries': retries,
+# 'priority': priority,
+# 'host': 'localhost',
+# 'port': 11211,
+# 'unix_socket': None
+# }}
+
+ORDER = ['cache', 'net', 'connections', 'items', 'evicted_reclaimed',
+ 'get', 'get_rate', 'set_rate', 'cas', 'delete', 'increment', 'decrement', 'touch', 'touch_rate']
+
+CHARTS = {
+ 'cache': {
+ 'options': [None, 'Cache Size', 'megabytes', 'cache', 'memcached.cache', 'stacked'],
+ 'lines': [
+ ['avail', 'available', 'absolute', 1, 1048576],
+ ['used', 'used', 'absolute', 1, 1048576]
+ ]
+ },
+ 'net': {
+ 'options': [None, 'Network', 'kilobits/s', 'network', 'memcached.net', 'area'],
+ 'lines': [
+ ['bytes_read', 'in', 'incremental', 8, 1024],
+ ['bytes_written', 'out', 'incremental', -8, 1024]
+ ]
+ },
+ 'connections': {
+ 'options': [None, 'Connections', 'connections/s', 'connections', 'memcached.connections', 'line'],
+ 'lines': [
+ ['curr_connections', 'current', 'incremental'],
+ ['rejected_connections', 'rejected', 'incremental'],
+ ['total_connections', 'total', 'incremental']
+ ]
+ },
+ 'items': {
+ 'options': [None, 'Items', 'items', 'items', 'memcached.items', 'line'],
+ 'lines': [
+ ['curr_items', 'current', 'absolute'],
+ ['total_items', 'total', 'absolute']
+ ]
+ },
+ 'evicted_reclaimed': {
+ 'options': [None, 'Items', 'items', 'items', 'memcached.evicted_reclaimed', 'line'],
+ 'lines': [
+ ['reclaimed', 'reclaimed', 'absolute'],
+ ['evictions', 'evicted', 'absolute']
+ ]
+ },
+ 'get': {
+ 'options': [None, 'Requests', 'requests', 'get ops', 'memcached.get', 'stacked'],
+ 'lines': [
+ ['get_hits', 'hits', 'percent-of-absolute-row'],
+ ['get_misses', 'misses', 'percent-of-absolute-row']
+ ]
+ },
+ 'get_rate': {
+ 'options': [None, 'Rate', 'requests/s', 'get ops', 'memcached.get_rate', 'line'],
+ 'lines': [
+ ['cmd_get', 'rate', 'incremental']
+ ]
+ },
+ 'set_rate': {
+ 'options': [None, 'Rate', 'requests/s', 'set ops', 'memcached.set_rate', 'line'],
+ 'lines': [
+ ['cmd_set', 'rate', 'incremental']
+ ]
+ },
+ 'delete': {
+ 'options': [None, 'Requests', 'requests', 'delete ops', 'memcached.delete', 'stacked'],
+ 'lines': [
+ ['delete_hits', 'hits', 'percent-of-absolute-row'],
+ ['delete_misses', 'misses', 'percent-of-absolute-row'],
+ ]
+ },
+ 'cas': {
+ 'options': [None, 'Requests', 'requests', 'check and set ops', 'memcached.cas', 'stacked'],
+ 'lines': [
+ ['cas_hits', 'hits', 'percent-of-absolute-row'],
+ ['cas_misses', 'misses', 'percent-of-absolute-row'],
+ ['cas_badval', 'bad value', 'percent-of-absolute-row']
+ ]
+ },
+ 'increment': {
+ 'options': [None, 'Requests', 'requests', 'increment ops', 'memcached.increment', 'stacked'],
+ 'lines': [
+ ['incr_hits', 'hits', 'percent-of-absolute-row'],
+ ['incr_misses', 'misses', 'percent-of-absolute-row']
+ ]
+ },
+ 'decrement': {
+ 'options': [None, 'Requests', 'requests', 'decrement ops', 'memcached.decrement', 'stacked'],
+ 'lines': [
+ ['decr_hits', 'hits', 'percent-of-absolute-row'],
+ ['decr_misses', 'misses', 'percent-of-absolute-row']
+ ]
+ },
+ 'touch': {
+ 'options': [None, 'Requests', 'requests', 'touch ops', 'memcached.touch', 'stacked'],
+ 'lines': [
+ ['touch_hits', 'hits', 'percent-of-absolute-row'],
+ ['touch_misses', 'misses', 'percent-of-absolute-row']
+ ]
+ },
+ 'touch_rate': {
+ 'options': [None, 'Rate', 'requests/s', 'touch ops', 'memcached.touch_rate', 'line'],
+ 'lines': [
+ ['cmd_touch', 'rate', 'incremental']
+ ]
+ }
+}
+
+
+class Service(SocketService):
+ def __init__(self, configuration=None, name=None):
+ SocketService.__init__(self, configuration=configuration, name=name)
+ self.request = 'stats\r\n'
+ self.host = 'localhost'
+ self.port = 11211
+ self._keep_alive = True
+ self.unix_socket = None
+ self.order = ORDER
+ self.definitions = CHARTS
+
+ def _get_data(self):
+ """
+ Get data from socket
+ :return: dict
+ """
+ response = self._get_raw_data()
+ if response is None:
+ # error has already been logged
+ return None
+
+ if response.startswith('ERROR'):
+ self.error('received ERROR')
+ return None
+
+ try:
+ parsed = response.split('\n')
+ except AttributeError:
+ self.error('response is invalid/empty')
+ return None
+
+ # split the response
+ data = {}
+ for line in parsed:
+ if line.startswith('STAT'):
+ try:
+ t = line[5:].split(' ')
+ data[t[0]] = t[1]
+ except (IndexError, ValueError):
+ self.debug('invalid line received: ' + str(line))
+
+ if not data:
+ self.error("received data doesn't have any records")
+ return None
+
+ # custom calculations
+ try:
+ data['avail'] = int(data['limit_maxbytes']) - int(data['bytes'])
+ data['used'] = int(data['bytes'])
+ except (KeyError, ValueError, TypeError):
+ pass
+
+ return data
+
+ def _check_raw_data(self, data):
+ if data.endswith('END\r\n'):
+ self.debug('received full response from memcached')
+ return True
+
+ self.debug('waiting more data from memcached')
+ return False
+
+ def check(self):
+ """
+ Parse configuration, check if memcached is available
+ :return: boolean
+ """
+ self._parse_config()
+ data = self._get_data()
+ if data is None:
+ return False
+ return True
diff --git a/collectors/python.d.plugin/memcached/memcached.conf b/collectors/python.d.plugin/memcached/memcached.conf
new file mode 100644
index 000000000..85c3daf65
--- /dev/null
+++ b/collectors/python.d.plugin/memcached/memcached.conf
@@ -0,0 +1,92 @@
+# netdata python.d.plugin configuration for memcached
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, memcached also supports the following:
+#
+# socket: 'path/to/memcached.sock'
+#
+# or
+# host: 'IP or HOSTNAME' # the host to connect to
+# port: PORT # the port to connect to
+#
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ host : 'localhost'
+ port : 11211
+
+localipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 11211
+
+localipv6:
+ name : 'local'
+ host : '::1'
+ port : 11211
+
diff --git a/collectors/python.d.plugin/mongodb/Makefile.inc b/collectors/python.d.plugin/mongodb/Makefile.inc
new file mode 100644
index 000000000..784945aa6
--- /dev/null
+++ b/collectors/python.d.plugin/mongodb/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += mongodb/mongodb.chart.py
+dist_pythonconfig_DATA += mongodb/mongodb.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += mongodb/README.md mongodb/Makefile.inc
+
diff --git a/collectors/python.d.plugin/mongodb/README.md b/collectors/python.d.plugin/mongodb/README.md
new file mode 100644
index 000000000..8e5f652c5
--- /dev/null
+++ b/collectors/python.d.plugin/mongodb/README.md
@@ -0,0 +1,141 @@
+# mongodb
+
+Module monitor mongodb performance and health metrics
+
+**Requirements:**
+ * `python-pymongo` package v2.4+.
+
+You need to install it manually.
+
+
+Number of charts depends on mongodb version, storage engine and other features (replication):
+
+1. **Read requests**:
+ * query
+ * getmore (operation the cursor executes to get additional data from query)
+
+2. **Write requests**:
+ * insert
+ * delete
+ * update
+
+3. **Active clients**:
+ * readers (number of clients with read operations in progress or queued)
+ * writers (number of clients with write operations in progress or queued)
+
+4. **Journal transactions**:
+ * commits (count of transactions that have been written to the journal)
+
+5. **Data written to the journal**:
+ * volume (volume of data)
+
+6. **Background flush** (MMAPv1):
+ * average ms (average time taken by flushes to execute)
+ * last ms (time taken by the last flush)
+
+8. **Read tickets** (WiredTiger):
+ * in use (number of read tickets in use)
+ * available (number of available read tickets remaining)
+
+9. **Write tickets** (WiredTiger):
+ * in use (number of write tickets in use)
+ * available (number of available write tickets remaining)
+
+10. **Cursors**:
+ * opened (number of cursors currently opened by MongoDB for clients)
+ * timedOut (number of cursors that have timed)
+ * noTimeout (number of open cursors with timeout disabled)
+
+11. **Connections**:
+ * connected (number of clients currently connected to the database server)
+ * unused (number of unused connections available for new clients)
+
+12. **Memory usage metrics**:
+ * virtual
+ * resident (amount of memory used by the database process)
+ * mapped
+ * non mapped
+
+13. **Page faults**:
+ * page faults (number of times MongoDB had to request from disk)
+
+14. **Cache metrics** (WiredTiger):
+ * percentage of bytes currently in the cache (amount of space taken by cached data)
+ * percantage of tracked dirty bytes in the cache (amount of space taken by dirty data)
+
+15. **Pages evicted from cache** (WiredTiger):
+ * modified
+ * unmodified
+
+16. **Queued requests**:
+ * readers (number of read request currently queued)
+ * writers (number of write request currently queued)
+
+17. **Errors**:
+ * msg (number of message assertions raised)
+ * warning (number of warning assertions raised)
+ * regular (number of regular assertions raised)
+ * user (number of assertions corresponding to errors generated by users)
+
+18. **Storage metrics** (one chart for every database)
+ * dataSize (size of all documents + padding in the database)
+ * indexSize (size of all indexes in the database)
+ * storageSize (size of all extents in the database)
+
+19. **Documents in the database** (one chart for all databases)
+ * documents (number of objects in the database among all the collections)
+
+20. **tcmalloc metrics**
+ * central cache free
+ * current total thread cache
+ * pageheap free
+ * pageheap unmapped
+ * thread cache free
+ * transfer cache free
+ * heap size
+
+21. **Commands total/failed rate**
+ * count
+ * createIndex
+ * delete
+ * eval
+ * findAndModify
+ * insert
+
+22. **Locks metrics** (acquireCount metrics - number of times the lock was acquired in the specified mode)
+ * Global lock
+ * Database lock
+ * Collection lock
+ * Metadata lock
+ * oplog lock
+
+23. **Replica set members state**
+ * state
+
+24. **Oplog window**
+ * window (interval of time between the oldest and the latest entries in the oplog)
+
+25. **Replication lag**
+ * member (time when last entry from the oplog was applied for every member)
+
+26. **Replication set member heartbeat latency**
+ * member (time when last heartbeat was received from replica set member)
+
+
+### configuration
+
+Sample:
+
+```yaml
+local:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 27017
+ user : 'netdata'
+ pass : 'netdata'
+
+```
+
+If no configuration is given, module will attempt to connect to mongodb daemon on `127.0.0.1:27017` address
+
+---
diff --git a/collectors/python.d.plugin/mongodb/mongodb.chart.py b/collectors/python.d.plugin/mongodb/mongodb.chart.py
new file mode 100644
index 000000000..10344342d
--- /dev/null
+++ b/collectors/python.d.plugin/mongodb/mongodb.chart.py
@@ -0,0 +1,731 @@
+# -*- coding: utf-8 -*-
+# Description: mongodb netdata python.d module
+# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from copy import deepcopy
+from datetime import datetime
+from sys import exc_info
+
+try:
+ from pymongo import MongoClient, ASCENDING, DESCENDING
+ from pymongo.errors import PyMongoError
+ PYMONGO = True
+except ImportError:
+ PYMONGO = False
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+# default module values (can be overridden per job in `config`)
+# update_every = 2
+priority = 60000
+retries = 60
+
+REPL_SET_STATES = [
+ ('1', 'primary'),
+ ('8', 'down'),
+ ('2', 'secondary'),
+ ('3', 'recovering'),
+ ('5', 'startup2'),
+ ('4', 'fatal'),
+ ('7', 'arbiter'),
+ ('6', 'unknown'),
+ ('9', 'rollback'),
+ ('10', 'removed'),
+ ('0', 'startup')
+]
+
+
+def multiply_by_100(value):
+ return value * 100
+
+
+DEFAULT_METRICS = [
+ ('opcounters.delete', None, None),
+ ('opcounters.update', None, None),
+ ('opcounters.insert', None, None),
+ ('opcounters.query', None, None),
+ ('opcounters.getmore', None, None),
+ ('globalLock.activeClients.readers', 'activeClients_readers', None),
+ ('globalLock.activeClients.writers', 'activeClients_writers', None),
+ ('connections.available', 'connections_available', None),
+ ('connections.current', 'connections_current', None),
+ ('mem.mapped', None, None),
+ ('mem.resident', None, None),
+ ('mem.virtual', None, None),
+ ('globalLock.currentQueue.readers', 'currentQueue_readers', None),
+ ('globalLock.currentQueue.writers', 'currentQueue_writers', None),
+ ('asserts.msg', None, None),
+ ('asserts.regular', None, None),
+ ('asserts.user', None, None),
+ ('asserts.warning', None, None),
+ ('extra_info.page_faults', None, None),
+ ('metrics.record.moves', None, None),
+ ('backgroundFlushing.average_ms', None, multiply_by_100),
+ ('backgroundFlushing.last_ms', None, multiply_by_100),
+ ('backgroundFlushing.flushes', None, multiply_by_100),
+ ('metrics.cursor.timedOut', None, None),
+ ('metrics.cursor.open.total', 'cursor_total', None),
+ ('metrics.cursor.open.noTimeout', None, None),
+ ('cursors.timedOut', None, None),
+ ('cursors.totalOpen', 'cursor_total', None)
+]
+
+DUR = [
+ ('dur.commits', None, None),
+ ('dur.journaledMB', None, multiply_by_100)
+]
+
+WIREDTIGER = [
+ ('wiredTiger.concurrentTransactions.read.available', 'wiredTigerRead_available', None),
+ ('wiredTiger.concurrentTransactions.read.out', 'wiredTigerRead_out', None),
+ ('wiredTiger.concurrentTransactions.write.available', 'wiredTigerWrite_available', None),
+ ('wiredTiger.concurrentTransactions.write.out', 'wiredTigerWrite_out', None),
+ ('wiredTiger.cache.bytes currently in the cache', None, None),
+ ('wiredTiger.cache.tracked dirty bytes in the cache', None, None),
+ ('wiredTiger.cache.maximum bytes configured', None, None),
+ ('wiredTiger.cache.unmodified pages evicted', 'unmodified', None),
+ ('wiredTiger.cache.modified pages evicted', 'modified', None)
+]
+
+TCMALLOC = [
+ ('tcmalloc.generic.current_allocated_bytes', None, None),
+ ('tcmalloc.generic.heap_size', None, None),
+ ('tcmalloc.tcmalloc.central_cache_free_bytes', None, None),
+ ('tcmalloc.tcmalloc.current_total_thread_cache_bytes', None, None),
+ ('tcmalloc.tcmalloc.pageheap_free_bytes', None, None),
+ ('tcmalloc.tcmalloc.pageheap_unmapped_bytes', None, None),
+ ('tcmalloc.tcmalloc.thread_cache_free_bytes', None, None),
+ ('tcmalloc.tcmalloc.transfer_cache_free_bytes', None, None)
+]
+
+COMMANDS = [
+ ('metrics.commands.count.total', 'count_total', None),
+ ('metrics.commands.createIndexes.total', 'createIndexes_total', None),
+ ('metrics.commands.delete.total', 'delete_total', None),
+ ('metrics.commands.eval.total', 'eval_total', None),
+ ('metrics.commands.findAndModify.total', 'findAndModify_total', None),
+ ('metrics.commands.insert.total', 'insert_total', None),
+ ('metrics.commands.delete.total', 'delete_total', None),
+ ('metrics.commands.count.failed', 'count_failed', None),
+ ('metrics.commands.createIndexes.failed', 'createIndexes_failed', None),
+ ('metrics.commands.delete.failed', 'delete_failed', None),
+ ('metrics.commands.eval.failed', 'eval_failed', None),
+ ('metrics.commands.findAndModify.failed', 'findAndModify_failed', None),
+ ('metrics.commands.insert.failed', 'insert_failed', None),
+ ('metrics.commands.delete.failed', 'delete_failed', None)
+]
+
+LOCKS = [
+ ('locks.Collection.acquireCount.R', 'Collection_R', None),
+ ('locks.Collection.acquireCount.r', 'Collection_r', None),
+ ('locks.Collection.acquireCount.W', 'Collection_W', None),
+ ('locks.Collection.acquireCount.w', 'Collection_w', None),
+ ('locks.Database.acquireCount.R', 'Database_R', None),
+ ('locks.Database.acquireCount.r', 'Database_r', None),
+ ('locks.Database.acquireCount.W', 'Database_W', None),
+ ('locks.Database.acquireCount.w', 'Database_w', None),
+ ('locks.Global.acquireCount.R', 'Global_R', None),
+ ('locks.Global.acquireCount.r', 'Global_r', None),
+ ('locks.Global.acquireCount.W', 'Global_W', None),
+ ('locks.Global.acquireCount.w', 'Global_w', None),
+ ('locks.Metadata.acquireCount.R', 'Metadata_R', None),
+ ('locks.Metadata.acquireCount.w', 'Metadata_w', None),
+ ('locks.oplog.acquireCount.r', 'oplog_r', None),
+ ('locks.oplog.acquireCount.w', 'oplog_w', None)
+]
+
+DBSTATS = [
+ 'dataSize',
+ 'indexSize',
+ 'storageSize',
+ 'objects'
+]
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = [
+ 'read_operations',
+ 'write_operations',
+ 'active_clients',
+ 'journaling_transactions',
+ 'journaling_volume',
+ 'background_flush_average',
+ 'background_flush_last',
+ 'background_flush_rate',
+ 'wiredtiger_read',
+ 'wiredtiger_write',
+ 'cursors',
+ 'connections',
+ 'memory',
+ 'page_faults',
+ 'queued_requests',
+ 'record_moves',
+ 'wiredtiger_cache',
+ 'wiredtiger_pages_evicted',
+ 'asserts',
+ 'locks_collection',
+ 'locks_database',
+ 'locks_global',
+ 'locks_metadata',
+ 'locks_oplog',
+ 'dbstats_objects',
+ 'tcmalloc_generic',
+ 'tcmalloc_metrics',
+ 'command_total_rate',
+ 'command_failed_rate'
+]
+
+CHARTS = {
+ 'read_operations': {
+ 'options': [None, 'Received read requests', 'requests/s', 'throughput metrics',
+ 'mongodb.read_operations', 'line'],
+ 'lines': [
+ ['query', None, 'incremental'],
+ ['getmore', None, 'incremental']
+ ]
+ },
+ 'write_operations': {
+ 'options': [None, 'Received write requests', 'requests/s', 'throughput metrics',
+ 'mongodb.write_operations', 'line'],
+ 'lines': [
+ ['insert', None, 'incremental'],
+ ['update', None, 'incremental'],
+ ['delete', None, 'incremental']
+ ]
+ },
+ 'active_clients': {
+ 'options': [None, 'Clients with read or write operations in progress or queued', 'clients',
+ 'throughput metrics', 'mongodb.active_clients', 'line'],
+ 'lines': [
+ ['activeClients_readers', 'readers', 'absolute'],
+ ['activeClients_writers', 'writers', 'absolute']
+ ]
+ },
+ 'journaling_transactions': {
+ 'options': [None, 'Transactions that have been written to the journal', 'commits',
+ 'database performance', 'mongodb.journaling_transactions', 'line'],
+ 'lines': [
+ ['commits', None, 'absolute']
+ ]
+ },
+ 'journaling_volume': {
+ 'options': [None, 'Volume of data written to the journal', 'MB', 'database performance',
+ 'mongodb.journaling_volume', 'line'],
+ 'lines': [
+ ['journaledMB', 'volume', 'absolute', 1, 100]
+ ]
+ },
+ 'background_flush_average': {
+ 'options': [None, 'Average time taken by flushes to execute', 'ms', 'database performance',
+ 'mongodb.background_flush_average', 'line'],
+ 'lines': [
+ ['average_ms', 'time', 'absolute', 1, 100]
+ ]
+ },
+ 'background_flush_last': {
+ 'options': [None, 'Time taken by the last flush operation to execute', 'ms', 'database performance',
+ 'mongodb.background_flush_last', 'line'],
+ 'lines': [
+ ['last_ms', 'time', 'absolute', 1, 100]
+ ]
+ },
+ 'background_flush_rate': {
+ 'options': [None, 'Flushes rate', 'flushes', 'database performance', 'mongodb.background_flush_rate', 'line'],
+ 'lines': [
+ ['flushes', 'flushes', 'incremental', 1, 1]
+ ]
+ },
+ 'wiredtiger_read': {
+ 'options': [None, 'Read tickets in use and remaining', 'tickets', 'database performance',
+ 'mongodb.wiredtiger_read', 'stacked'],
+ 'lines': [
+ ['wiredTigerRead_available', 'available', 'absolute', 1, 1],
+ ['wiredTigerRead_out', 'inuse', 'absolute', 1, 1]
+ ]
+ },
+ 'wiredtiger_write': {
+ 'options': [None, 'Write tickets in use and remaining', 'tickets', 'database performance',
+ 'mongodb.wiredtiger_write', 'stacked'],
+ 'lines': [
+ ['wiredTigerWrite_available', 'available', 'absolute', 1, 1],
+ ['wiredTigerWrite_out', 'inuse', 'absolute', 1, 1]
+ ]
+ },
+ 'cursors': {
+ 'options': [None, 'Currently openned cursors, cursors with timeout disabled and timed out cursors',
+ 'cursors', 'database performance', 'mongodb.cursors', 'stacked'],
+ 'lines': [
+ ['cursor_total', 'openned', 'absolute', 1, 1],
+ ['noTimeout', None, 'absolute', 1, 1],
+ ['timedOut', None, 'incremental', 1, 1]
+ ]
+ },
+ 'connections': {
+ 'options': [None, 'Currently connected clients and unused connections', 'connections',
+ 'resource utilization', 'mongodb.connections', 'stacked'],
+ 'lines': [
+ ['connections_available', 'unused', 'absolute', 1, 1],
+ ['connections_current', 'connected', 'absolute', 1, 1]
+ ]
+ },
+ 'memory': {
+ 'options': [None, 'Memory metrics', 'MB', 'resource utilization', 'mongodb.memory', 'stacked'],
+ 'lines': [
+ ['virtual', None, 'absolute', 1, 1],
+ ['resident', None, 'absolute', 1, 1],
+ ['nonmapped', None, 'absolute', 1, 1],
+ ['mapped', None, 'absolute', 1, 1]
+ ]
+ },
+ 'page_faults': {
+ 'options': [None, 'Number of times MongoDB had to fetch data from disk', 'request/s',
+ 'resource utilization', 'mongodb.page_faults', 'line'],
+ 'lines': [
+ ['page_faults', None, 'incremental', 1, 1]
+ ]
+ },
+ 'queued_requests': {
+ 'options': [None, 'Currently queued read and write requests', 'requests', 'resource saturation',
+ 'mongodb.queued_requests', 'line'],
+ 'lines': [
+ ['currentQueue_readers', 'readers', 'absolute', 1, 1],
+ ['currentQueue_writers', 'writers', 'absolute', 1, 1]
+ ]
+ },
+ 'record_moves': {
+ 'options': [None, 'Number of times documents had to be moved on-disk', 'number',
+ 'resource saturation', 'mongodb.record_moves', 'line'],
+ 'lines': [
+ ['moves', None, 'incremental', 1, 1]
+ ]
+ },
+ 'asserts': {
+ 'options': [
+ None,
+ 'Number of message, warning, regular, corresponding to errors generated by users assertions raised',
+ 'number', 'errors (asserts)', 'mongodb.asserts', 'line'],
+ 'lines': [
+ ['msg', None, 'incremental', 1, 1],
+ ['warning', None, 'incremental', 1, 1],
+ ['regular', None, 'incremental', 1, 1],
+ ['user', None, 'incremental', 1, 1]
+ ]
+ },
+ 'wiredtiger_cache': {
+ 'options': [None, 'The percentage of the wiredTiger cache that is in use and cache with dirty bytes',
+ 'percent', 'resource utilization', 'mongodb.wiredtiger_cache', 'stacked'],
+ 'lines': [
+ ['wiredTiger_percent_clean', 'inuse', 'absolute', 1, 1000],
+ ['wiredTiger_percent_dirty', 'dirty', 'absolute', 1, 1000]
+ ]
+ },
+ 'wiredtiger_pages_evicted': {
+ 'options': [None, 'Pages evicted from the cache',
+ 'pages', 'resource utilization', 'mongodb.wiredtiger_pages_evicted', 'stacked'],
+ 'lines': [
+ ['unmodified', None, 'absolute', 1, 1],
+ ['modified', None, 'absolute', 1, 1]
+ ]
+ },
+ 'dbstats_objects': {
+ 'options': [None, 'Number of documents in the database among all the collections', 'documents',
+ 'storage size metrics', 'mongodb.dbstats_objects', 'stacked'],
+ 'lines': []
+ },
+ 'tcmalloc_generic': {
+ 'options': [None, 'Tcmalloc generic metrics', 'MB', 'tcmalloc', 'mongodb.tcmalloc_generic', 'stacked'],
+ 'lines': [
+ ['current_allocated_bytes', 'allocated', 'absolute', 1, 1048576],
+ ['heap_size', 'heap_size', 'absolute', 1, 1048576]
+ ]
+ },
+ 'tcmalloc_metrics': {
+ 'options': [None, 'Tcmalloc metrics', 'KB', 'tcmalloc', 'mongodb.tcmalloc_metrics', 'stacked'],
+ 'lines': [
+ ['central_cache_free_bytes', 'central_cache_free', 'absolute', 1, 1024],
+ ['current_total_thread_cache_bytes', 'current_total_thread_cache', 'absolute', 1, 1024],
+ ['pageheap_free_bytes', 'pageheap_free', 'absolute', 1, 1024],
+ ['pageheap_unmapped_bytes', 'pageheap_unmapped', 'absolute', 1, 1024],
+ ['thread_cache_free_bytes', 'thread_cache_free', 'absolute', 1, 1024],
+ ['transfer_cache_free_bytes', 'transfer_cache_free', 'absolute', 1, 1024]
+ ]
+ },
+ 'command_total_rate': {
+ 'options': [None, 'Commands total rate', 'commands/s', 'commands', 'mongodb.command_total_rate', 'stacked'],
+ 'lines': [
+ ['count_total', 'count', 'incremental', 1, 1],
+ ['createIndexes_total', 'createIndexes', 'incremental', 1, 1],
+ ['delete_total', 'delete', 'incremental', 1, 1],
+ ['eval_total', 'eval', 'incremental', 1, 1],
+ ['findAndModify_total', 'findAndModify', 'incremental', 1, 1],
+ ['insert_total', 'insert', 'incremental', 1, 1],
+ ['update_total', 'update', 'incremental', 1, 1]
+ ]
+ },
+ 'command_failed_rate': {
+ 'options': [None, 'Commands failed rate', 'commands/s', 'commands', 'mongodb.command_failed_rate', 'stacked'],
+ 'lines': [
+ ['count_failed', 'count', 'incremental', 1, 1],
+ ['createIndexes_failed', 'createIndexes', 'incremental', 1, 1],
+ ['delete_failed', 'delete', 'incremental', 1, 1],
+ ['eval_failed', 'eval', 'incremental', 1, 1],
+ ['findAndModify_failed', 'findAndModify', 'incremental', 1, 1],
+ ['insert_failed', 'insert', 'incremental', 1, 1],
+ ['update_failed', 'update', 'incremental', 1, 1]
+ ]
+ },
+ 'locks_collection': {
+ 'options': [None, 'Collection lock. Number of times the lock was acquired in the specified mode',
+ 'locks', 'locks metrics', 'mongodb.locks_collection', 'stacked'],
+ 'lines': [
+ ['Collection_R', 'shared', 'incremental'],
+ ['Collection_W', 'exclusive', 'incremental'],
+ ['Collection_r', 'intent_shared', 'incremental'],
+ ['Collection_w', 'intent_exclusive', 'incremental']
+ ]
+ },
+ 'locks_database': {
+ 'options': [None, 'Database lock. Number of times the lock was acquired in the specified mode',
+ 'locks', 'locks metrics', 'mongodb.locks_database', 'stacked'],
+ 'lines': [
+ ['Database_R', 'shared', 'incremental'],
+ ['Database_W', 'exclusive', 'incremental'],
+ ['Database_r', 'intent_shared', 'incremental'],
+ ['Database_w', 'intent_exclusive', 'incremental']
+ ]
+ },
+ 'locks_global': {
+ 'options': [None, 'Global lock. Number of times the lock was acquired in the specified mode',
+ 'locks', 'locks metrics', 'mongodb.locks_global', 'stacked'],
+ 'lines': [
+ ['Global_R', 'shared', 'incremental'],
+ ['Global_W', 'exclusive', 'incremental'],
+ ['Global_r', 'intent_shared', 'incremental'],
+ ['Global_w', 'intent_exclusive', 'incremental']
+ ]
+ },
+ 'locks_metadata': {
+ 'options': [None, 'Metadata lock. Number of times the lock was acquired in the specified mode',
+ 'locks', 'locks metrics', 'mongodb.locks_metadata', 'stacked'],
+ 'lines': [
+ ['Metadata_R', 'shared', 'incremental'],
+ ['Metadata_w', 'intent_exclusive', 'incremental']
+ ]
+ },
+ 'locks_oplog': {
+ 'options': [None, 'Lock on the oplog. Number of times the lock was acquired in the specified mode',
+ 'locks', 'locks metrics', 'mongodb.locks_oplog', 'stacked'],
+ 'lines': [
+ ['oplog_r', 'intent_shared', 'incremental'],
+ ['oplog_w', 'intent_exclusive', 'incremental']
+ ]
+ }
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER[:]
+ self.definitions = deepcopy(CHARTS)
+ self.user = self.configuration.get('user')
+ self.password = self.configuration.get('pass')
+ self.host = self.configuration.get('host', '127.0.0.1')
+ self.port = self.configuration.get('port', 27017)
+ self.timeout = self.configuration.get('timeout', 100)
+ self.metrics_to_collect = deepcopy(DEFAULT_METRICS)
+ self.connection = None
+ self.do_replica = None
+ self.databases = list()
+
+ def check(self):
+ if not PYMONGO:
+ self.error('Pymongo package v2.4+ is needed to use mongodb.chart.py')
+ return False
+ self.connection, server_status, error = self._create_connection()
+ if error:
+ self.error(error)
+ return False
+
+ self.build_metrics_to_collect_(server_status)
+
+ try:
+ data = self._get_data()
+ except (LookupError, SyntaxError, AttributeError):
+ self.error('Type: %s, error: %s' % (str(exc_info()[0]), str(exc_info()[1])))
+ return False
+ if isinstance(data, dict) and data:
+ self._data_from_check = data
+ self.create_charts_(server_status)
+ return True
+ self.error('_get_data() returned no data or type is not <dict>')
+ return False
+
+ def build_metrics_to_collect_(self, server_status):
+
+ self.do_replica = 'repl' in server_status
+ if 'dur' in server_status:
+ self.metrics_to_collect.extend(DUR)
+ if 'tcmalloc' in server_status:
+ self.metrics_to_collect.extend(TCMALLOC)
+ if 'commands' in server_status['metrics']:
+ self.metrics_to_collect.extend(COMMANDS)
+ if 'wiredTiger' in server_status:
+ self.metrics_to_collect.extend(WIREDTIGER)
+ if 'Collection' in server_status['locks']:
+ self.metrics_to_collect.extend(LOCKS)
+
+ def create_charts_(self, server_status):
+
+ if 'dur' not in server_status:
+ self.order.remove('journaling_transactions')
+ self.order.remove('journaling_volume')
+
+ if 'backgroundFlushing' not in server_status:
+ self.order.remove('background_flush_average')
+ self.order.remove('background_flush_last')
+ self.order.remove('background_flush_rate')
+
+ if 'wiredTiger' not in server_status:
+ self.order.remove('wiredtiger_write')
+ self.order.remove('wiredtiger_read')
+ self.order.remove('wiredtiger_cache')
+
+ if 'tcmalloc' not in server_status:
+ self.order.remove('tcmalloc_generic')
+ self.order.remove('tcmalloc_metrics')
+
+ if 'commands' not in server_status['metrics']:
+ self.order.remove('command_total_rate')
+ self.order.remove('command_failed_rate')
+
+ if 'Collection' not in server_status['locks']:
+ self.order.remove('locks_collection')
+ self.order.remove('locks_database')
+ self.order.remove('locks_global')
+ self.order.remove('locks_metadata')
+
+ if 'oplog' not in server_status['locks']:
+ self.order.remove('locks_oplog')
+
+ for dbase in self.databases:
+ self.order.append('_'.join([dbase, 'dbstats']))
+ self.definitions['_'.join([dbase, 'dbstats'])] = {
+ 'options': [None, '%s: size of all documents, indexes, extents' % dbase, 'KB',
+ 'storage size metrics', 'mongodb.dbstats', 'line'],
+ 'lines': [
+ ['_'.join([dbase, 'dataSize']), 'documents', 'absolute', 1, 1024],
+ ['_'.join([dbase, 'indexSize']), 'indexes', 'absolute', 1, 1024],
+ ['_'.join([dbase, 'storageSize']), 'extents', 'absolute', 1, 1024]
+ ]}
+ self.definitions['dbstats_objects']['lines'].append(['_'.join([dbase, 'objects']), dbase, 'absolute'])
+
+ if self.do_replica:
+ def create_lines(hosts, string):
+ lines = list()
+ for host in hosts:
+ dim_id = '_'.join([host, string])
+ lines.append([dim_id, host, 'absolute', 1, 1000])
+ return lines
+
+ def create_state_lines(states):
+ lines = list()
+ for state, description in states:
+ dim_id = '_'.join([host, 'state', state])
+ lines.append([dim_id, description, 'absolute', 1, 1])
+ return lines
+
+ all_hosts = server_status['repl']['hosts'] + server_status['repl'].get('arbiters', list())
+ this_host = server_status['repl']['me']
+ other_hosts = [host for host in all_hosts if host != this_host]
+
+ if 'local' in self.databases:
+ self.order.append('oplog_window')
+ self.definitions['oplog_window'] = {
+ 'options': [None, 'Interval of time between the oldest and the latest entries in the oplog',
+ 'seconds', 'replication and oplog', 'mongodb.oplog_window', 'line'],
+ 'lines': [['timeDiff', 'window', 'absolute', 1, 1000]]}
+ # Create "heartbeat delay" chart
+ self.order.append('heartbeat_delay')
+ self.definitions['heartbeat_delay'] = {
+ 'options': [
+ None,
+ 'Time when last heartbeat was received from the replica set member (lastHeartbeatRecv)',
+ 'seconds ago', 'replication and oplog', 'mongodb.replication_heartbeat_delay', 'stacked'],
+ 'lines': create_lines(other_hosts, 'heartbeat_lag')}
+ # Create "optimedate delay" chart
+ self.order.append('optimedate_delay')
+ self.definitions['optimedate_delay'] = {
+ 'options': [None, 'Time when last entry from the oplog was applied (optimeDate)',
+ 'seconds ago', 'replication and oplog', 'mongodb.replication_optimedate_delay', 'stacked'],
+ 'lines': create_lines(all_hosts, 'optimedate')}
+ # Create "replica set members state" chart
+ for host in all_hosts:
+ chart_name = '_'.join([host, 'state'])
+ self.order.append(chart_name)
+ self.definitions[chart_name] = {
+ 'options': [None, 'Replica set member (%s) current state' % host, 'state',
+ 'replication and oplog', 'mongodb.replication_state', 'line'],
+ 'lines': create_state_lines(REPL_SET_STATES)}
+
+ def _get_raw_data(self):
+ raw_data = dict()
+
+ raw_data.update(self.get_server_status() or dict())
+ raw_data.update(self.get_db_stats() or dict())
+ raw_data.update(self.get_repl_set_get_status() or dict())
+ raw_data.update(self.get_get_replication_info() or dict())
+
+ return raw_data or None
+
+ def get_server_status(self):
+ raw_data = dict()
+ try:
+ raw_data['serverStatus'] = self.connection.admin.command('serverStatus')
+ except PyMongoError:
+ return None
+ else:
+ return raw_data
+
+ def get_db_stats(self):
+ if not self.databases:
+ return None
+
+ raw_data = dict()
+ raw_data['dbStats'] = dict()
+ try:
+ for dbase in self.databases:
+ raw_data['dbStats'][dbase] = self.connection[dbase].command('dbStats')
+ return raw_data
+ except PyMongoError:
+ return None
+
+ def get_repl_set_get_status(self):
+ if not self.do_replica:
+ return None
+
+ raw_data = dict()
+ try:
+ raw_data['replSetGetStatus'] = self.connection.admin.command('replSetGetStatus')
+ return raw_data
+ except PyMongoError:
+ return None
+
+ def get_get_replication_info(self):
+ if not (self.do_replica and 'local' in self.databases):
+ return None
+
+ raw_data = dict()
+ raw_data['getReplicationInfo'] = dict()
+ try:
+ raw_data['getReplicationInfo']['ASCENDING'] = self.connection.local.oplog.rs.find().sort(
+ '$natural', ASCENDING).limit(1)[0]
+ raw_data['getReplicationInfo']['DESCENDING'] = self.connection.local.oplog.rs.find().sort(
+ '$natural', DESCENDING).limit(1)[0]
+ return raw_data
+ except PyMongoError:
+ return None
+
+ def _get_data(self):
+ """
+ :return: dict
+ """
+ raw_data = self._get_raw_data()
+
+ if not raw_data:
+ return None
+
+ to_netdata = dict()
+ serverStatus = raw_data['serverStatus']
+ dbStats = raw_data.get('dbStats')
+ replSetGetStatus = raw_data.get('replSetGetStatus')
+ getReplicationInfo = raw_data.get('getReplicationInfo')
+ utc_now = datetime.utcnow()
+
+ # serverStatus
+ for metric, new_name, func in self.metrics_to_collect:
+ value = serverStatus
+ for key in metric.split('.'):
+ try:
+ value = value[key]
+ except KeyError:
+ break
+
+ if not isinstance(value, dict) and key:
+ to_netdata[new_name or key] = value if not func else func(value)
+
+ to_netdata['nonmapped'] = to_netdata['virtual'] - serverStatus['mem'].get('mappedWithJournal',
+ to_netdata['mapped'])
+ if to_netdata.get('maximum bytes configured'):
+ maximum = to_netdata['maximum bytes configured']
+ to_netdata['wiredTiger_percent_clean'] = int(to_netdata['bytes currently in the cache']
+ * 100 / maximum * 1000)
+ to_netdata['wiredTiger_percent_dirty'] = int(to_netdata['tracked dirty bytes in the cache']
+ * 100 / maximum * 1000)
+
+ # dbStats
+ if dbStats:
+ for dbase in dbStats:
+ for metric in DBSTATS:
+ key = '_'.join([dbase, metric])
+ to_netdata[key] = dbStats[dbase][metric]
+
+ # replSetGetStatus
+ if replSetGetStatus:
+ other_hosts = list()
+ members = replSetGetStatus['members']
+ unix_epoch = datetime(1970, 1, 1, 0, 0)
+
+ for member in members:
+ if not member.get('self'):
+ other_hosts.append(member)
+ # Replica set time diff between current time and time when last entry from the oplog was applied
+ if member.get('optimeDate', unix_epoch) != unix_epoch:
+ member_optimedate = member['name'] + '_optimedate'
+ to_netdata.update({member_optimedate: int(delta_calculation(delta=utc_now - member['optimeDate'],
+ multiplier=1000))})
+ # Replica set members state
+ member_state = member['name'] + '_state'
+ for elem in REPL_SET_STATES:
+ state = elem[0]
+ to_netdata.update({'_'.join([member_state, state]): 0})
+ to_netdata.update({'_'.join([member_state, str(member['state'])]): member['state']})
+ # Heartbeat lag calculation
+ for other in other_hosts:
+ if other['lastHeartbeatRecv'] != unix_epoch:
+ node = other['name'] + '_heartbeat_lag'
+ to_netdata[node] = int(delta_calculation(delta=utc_now - other['lastHeartbeatRecv'],
+ multiplier=1000))
+
+ if getReplicationInfo:
+ first_event = getReplicationInfo['ASCENDING']['ts'].as_datetime()
+ last_event = getReplicationInfo['DESCENDING']['ts'].as_datetime()
+ to_netdata['timeDiff'] = int(delta_calculation(delta=last_event - first_event, multiplier=1000))
+
+ return to_netdata
+
+ def _create_connection(self):
+ conn_vars = {'host': self.host, 'port': self.port}
+ if hasattr(MongoClient, 'server_selection_timeout'):
+ conn_vars.update({'serverselectiontimeoutms': self.timeout})
+ try:
+ connection = MongoClient(**conn_vars)
+ if self.user and self.password:
+ connection.admin.authenticate(name=self.user, password=self.password)
+ # elif self.user:
+ # connection.admin.authenticate(name=self.user, mechanism='MONGODB-X509')
+ server_status = connection.admin.command('serverStatus')
+ except PyMongoError as error:
+ return None, None, str(error)
+ else:
+ try:
+ self.databases = connection.database_names()
+ except PyMongoError as error:
+ self.info('Can\'t collect databases: %s' % str(error))
+ return connection, server_status, None
+
+
+def delta_calculation(delta, multiplier=1):
+ if hasattr(delta, 'total_seconds'):
+ return delta.total_seconds() * multiplier
+ return (delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6) / 10.0 ** 6 * multiplier
diff --git a/collectors/python.d.plugin/mongodb/mongodb.conf b/collectors/python.d.plugin/mongodb/mongodb.conf
new file mode 100644
index 000000000..62faef68d
--- /dev/null
+++ b/collectors/python.d.plugin/mongodb/mongodb.conf
@@ -0,0 +1,84 @@
+# netdata python.d.plugin configuration for mongodb
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, mongodb also supports the following:
+#
+# host: 'IP or HOSTNAME' # type <str> the host to connect to
+# port: PORT # type <int> the port to connect to
+#
+# in all cases, the following can also be set:
+#
+# user: 'username' # the mongodb username to use
+# pass: 'password' # the mongodb password to use
+#
+
+# ----------------------------------------------------------------------
+# to connect to the mongodb on localhost, without a password:
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+local:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 27017
diff --git a/collectors/python.d.plugin/monit/Makefile.inc b/collectors/python.d.plugin/monit/Makefile.inc
new file mode 100644
index 000000000..4a3673fd5
--- /dev/null
+++ b/collectors/python.d.plugin/monit/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += monit/monit.chart.py
+dist_pythonconfig_DATA += monit/monit.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += monit/README.md monit/Makefile.inc
+
diff --git a/collectors/python.d.plugin/monit/README.md b/collectors/python.d.plugin/monit/README.md
new file mode 100644
index 000000000..6d10240c9
--- /dev/null
+++ b/collectors/python.d.plugin/monit/README.md
@@ -0,0 +1,33 @@
+# monit
+
+Monit monitoring module. Data is grabbed from stats XML interface (exists for a long time, but not mentioned in official documentation). Mostly this plugin shows statuses of monit targets, i.e. [statuses of specified checks](https://mmonit.com/monit/documentation/monit.html#Service-checks).
+
+1. **Filesystems**
+ * Filesystems
+ * Directories
+ * Files
+ * Pipes
+
+2. **Applications**
+ * Processes (+threads/childs)
+ * Programs
+
+3. **Network**
+ * Hosts (+latency)
+ * Network interfaces
+
+### configuration
+
+Sample:
+
+```yaml
+local:
+ name : 'local'
+ url : 'http://localhost:2812'
+ user: : admin
+ pass: : monit
+```
+
+If no configuration is given, module will attempt to connect to monit as `http://localhost:2812`.
+
+---
diff --git a/collectors/python.d.plugin/monit/monit.chart.py b/collectors/python.d.plugin/monit/monit.chart.py
new file mode 100644
index 000000000..51943c0e1
--- /dev/null
+++ b/collectors/python.d.plugin/monit/monit.chart.py
@@ -0,0 +1,166 @@
+# -*- coding: utf-8 -*-
+# Description: monit netdata python.d module
+# Author: Evgeniy K. (n0guest)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import xml.etree.ElementTree as ET
+from bases.FrameworkServices.UrlService import UrlService
+
+# default module values (can be overridden per job in `config`)
+# update_every = 2
+priority = 60000
+retries = 60
+
+# see enum State_Type from monit.h (https://bitbucket.org/tildeslash/monit/src/master/src/monit.h)
+MONIT_SERVICE_NAMES = ['Filesystem', 'Directory', 'File', 'Process', 'Host', 'System', 'Fifo', 'Program', 'Net']
+DEFAULT_SERVICES_IDS = [0, 1, 2, 3, 4, 6, 7, 8]
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = [
+ 'filesystem',
+ 'directory',
+ 'file',
+ 'process',
+ 'process_uptime',
+ 'process_threads',
+ 'process_children',
+ 'host',
+ 'host_latency',
+ 'system',
+ 'fifo',
+ 'program',
+ 'net'
+]
+CHARTS = {
+ 'filesystem': {
+ 'options': ['filesystems', 'Filesystems', 'filesystems', 'filesystem', 'monit.filesystems', 'line'],
+ 'lines': []
+ },
+ 'directory': {
+ 'options': ['directories', 'Directories', 'directories', 'filesystem', 'monit.directories', 'line'],
+ 'lines': []
+ },
+ 'file': {
+ 'options': ['files', 'Files', 'files', 'filesystem', 'monit.files', 'line'],
+ 'lines': []
+ },
+ 'fifo': {
+ 'options': ['fifos', 'Pipes (fifo)', 'pipes', 'filesystem', 'monit.fifos', 'line'],
+ 'lines': []
+ },
+ 'program': {
+ 'options': ['programs', 'Programs statuses', 'programs', 'applications', 'monit.programs', 'line'],
+ 'lines': []
+ },
+ 'process': {
+ 'options': ['processes', 'Processes statuses', 'processes', 'applications', 'monit.services', 'line'],
+ 'lines': []
+ },
+ 'process_uptime': {
+ 'options': ['processes uptime', 'Processes uptime', 'seconds', 'applications',
+ 'monit.process_uptime', 'line', 'hidden'],
+ 'lines': []
+ },
+ 'process_threads': {
+ 'options': ['processes threads', 'Processes threads', 'threads', 'applications',
+ 'monit.process_threads', 'line'],
+ 'lines': []
+ },
+ 'process_children': {
+ 'options': ['processes childrens', 'Child processes', 'childrens', 'applications',
+ 'monit.process_childrens', 'line'],
+ 'lines': []
+ },
+ 'host': {
+ 'options': ['hosts', 'Hosts', 'hosts', 'network', 'monit.hosts', 'line'],
+ 'lines': []
+ },
+ 'host_latency': {
+ 'options': ['hosts latency', 'Hosts latency', 'milliseconds/s', 'network', 'monit.host_latency', 'line'],
+ 'lines': []
+ },
+ 'net': {
+ 'options': ['interfaces', 'Network interfaces and addresses', 'interfaces', 'network',
+ 'monit.networks', 'line'],
+ 'lines': []
+ },
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ base_url = self.configuration.get('url', 'http://localhost:2812')
+ self.url = '{0}/_status?format=xml&level=full'.format(base_url)
+ self.order = ORDER
+ self.definitions = CHARTS
+
+ def parse(self, data):
+ try:
+ xml = ET.fromstring(data)
+ except ET.ParseError:
+ self.error("URL {0} didn't return a vaild XML page. Please check your settings.".format(self.url))
+ return None
+ return xml
+
+ def check(self):
+ self._manager = self._build_manager()
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+ return bool(self.parse(raw_data))
+
+ def _get_data(self):
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+ xml = self.parse(raw_data)
+ if not xml:
+ return None
+
+ data = {}
+ for service_id in DEFAULT_SERVICES_IDS:
+ service_category = MONIT_SERVICE_NAMES[service_id].lower()
+ if service_category == 'system':
+ self.debug("Skipping service from 'System' category, because it's useless in graphs")
+ continue
+
+ xpath_query = "./service[@type='{0}']".format(service_id)
+ self.debug('Searching for {0} as {1}'.format(service_category, xpath_query))
+ for service_node in xml.findall(xpath_query):
+
+ service_name = service_node.find('name').text
+ service_status = service_node.find('status').text
+ service_monitoring = service_node.find('monitor').text
+ self.debug('=> found {0} with type={1}, status={2}, monitoring={3}'.format(service_name,
+ service_id, service_status, service_monitoring))
+
+ dimension_key = service_category + '_' + service_name
+ if dimension_key not in self.charts[service_category]:
+ self.charts[service_category].add_dimension([dimension_key, service_name, 'absolute'])
+ data[dimension_key] = 1 if service_status == '0' and service_monitoring == '1' else 0
+
+ if service_category == 'process':
+ for subnode in ('uptime', 'threads', 'children'):
+ subnode_value = service_node.find(subnode)
+ if subnode_value is None:
+ continue
+ if subnode == 'uptime' and int(subnode_value.text) < 0:
+ self.debug('Skipping bugged metrics with negative uptime (monit before v5.16')
+ continue
+ dimension_key = 'process_{0}_{1}'.format(subnode, service_name)
+ if dimension_key not in self.charts['process_' + subnode]:
+ self.charts['process_' + subnode].add_dimension([dimension_key, service_name, 'absolute'])
+ data[dimension_key] = int(subnode_value.text)
+
+ if service_category == 'host':
+ subnode_value = service_node.find('./icmp/responsetime')
+ if subnode_value is None:
+ continue
+ dimension_key = 'host_latency_{0}'.format(service_name)
+ if dimension_key not in self.charts['host_latency']:
+ self.charts['host_latency'].add_dimension([dimension_key, service_name,
+ 'absolute', 1000, 1000000])
+ data[dimension_key] = float(subnode_value.text) * 1000000
+
+ return data or None
diff --git a/collectors/python.d.plugin/monit/monit.conf b/collectors/python.d.plugin/monit/monit.conf
new file mode 100644
index 000000000..f9c26dbc3
--- /dev/null
+++ b/collectors/python.d.plugin/monit/monit.conf
@@ -0,0 +1,88 @@
+# netdata python.d.plugin configuration for monit
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, this plugin also supports the following:
+#
+# url: 'URL' # the URL to fetch monit's status stats
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+# Example
+#
+# local:
+# name : 'Local Monit'
+# url : 'http://localhost:2812'
+#
+# "local" will show up in Netdata logs. "Reverse Proxy" will show up in the menu
+# in the monit section.
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ url : 'http://localhost:2812'
diff --git a/collectors/python.d.plugin/mysql/Makefile.inc b/collectors/python.d.plugin/mysql/Makefile.inc
new file mode 100644
index 000000000..03e8b65eb
--- /dev/null
+++ b/collectors/python.d.plugin/mysql/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += mysql/mysql.chart.py
+dist_pythonconfig_DATA += mysql/mysql.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += mysql/README.md mysql/Makefile.inc
+
diff --git a/collectors/python.d.plugin/mysql/README.md b/collectors/python.d.plugin/mysql/README.md
new file mode 100644
index 000000000..e38098e7e
--- /dev/null
+++ b/collectors/python.d.plugin/mysql/README.md
@@ -0,0 +1,90 @@
+# mysql
+
+Module monitors one or more mysql servers
+
+**Requirements:**
+ * python library [MySQLdb](https://github.com/PyMySQL/mysqlclient-python) (faster) or [PyMySQL](https://github.com/PyMySQL/PyMySQL) (slower)
+
+It will produce following charts (if data is available):
+
+1. **Bandwidth** in kbps
+ * in
+ * out
+
+2. **Queries** in queries/sec
+ * queries
+ * questions
+ * slow queries
+
+3. **Operations** in operations/sec
+ * opened tables
+ * flush
+ * commit
+ * delete
+ * prepare
+ * read first
+ * read key
+ * read next
+ * read prev
+ * read random
+ * read random next
+ * rollback
+ * save point
+ * update
+ * write
+
+4. **Table Locks** in locks/sec
+ * immediate
+ * waited
+
+5. **Select Issues** in issues/sec
+ * full join
+ * full range join
+ * range
+ * range check
+ * scan
+
+6. **Sort Issues** in issues/sec
+ * merge passes
+ * range
+ * scan
+
+### configuration
+
+You can provide, per server, the following:
+
+1. username which have access to database (defaults to 'root')
+2. password (defaults to none)
+3. mysql my.cnf configuration file
+4. mysql socket (optional)
+5. mysql host (ip or hostname)
+6. mysql port (defaults to 3306)
+
+Here is an example for 3 servers:
+
+```yaml
+update_every : 10
+priority : 90100
+retries : 5
+
+local:
+ 'my.cnf' : '/etc/mysql/my.cnf'
+ priority : 90000
+
+local_2:
+ user : 'root'
+ pass : 'blablablabla'
+ socket : '/var/run/mysqld/mysqld.sock'
+ update_every : 1
+
+remote:
+ user : 'admin'
+ pass : 'bla'
+ host : 'example.org'
+ port : 9000
+ retries : 20
+```
+
+If no configuration is given, module will attempt to connect to mysql server via unix socket at `/var/run/mysqld/mysqld.sock` without password and with username `root`
+
+---
diff --git a/collectors/python.d.plugin/mysql/mysql.chart.py b/collectors/python.d.plugin/mysql/mysql.chart.py
new file mode 100644
index 000000000..c4d1e8b3a
--- /dev/null
+++ b/collectors/python.d.plugin/mysql/mysql.chart.py
@@ -0,0 +1,602 @@
+# -*- coding: utf-8 -*-
+# Description: MySQL netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from bases.FrameworkServices.MySQLService import MySQLService
+
+# default module values (can be overridden per job in `config`)
+# update_every = 3
+priority = 60000
+retries = 60
+
+# query executed on MySQL server
+QUERY_GLOBAL = 'SHOW GLOBAL STATUS;'
+QUERY_SLAVE = 'SHOW SLAVE STATUS;'
+QUERY_VARIABLES = 'SHOW GLOBAL VARIABLES LIKE \'max_connections\';'
+
+GLOBAL_STATS = [
+ 'Bytes_received',
+ 'Bytes_sent',
+ 'Queries',
+ 'Questions',
+ 'Slow_queries',
+ 'Handler_commit',
+ 'Handler_delete',
+ 'Handler_prepare',
+ 'Handler_read_first',
+ 'Handler_read_key',
+ 'Handler_read_next',
+ 'Handler_read_prev',
+ 'Handler_read_rnd',
+ 'Handler_read_rnd_next',
+ 'Handler_rollback',
+ 'Handler_savepoint',
+ 'Handler_savepoint_rollback',
+ 'Handler_update',
+ 'Handler_write',
+ 'Table_locks_immediate',
+ 'Table_locks_waited',
+ 'Select_full_join',
+ 'Select_full_range_join',
+ 'Select_range',
+ 'Select_range_check',
+ 'Select_scan',
+ 'Sort_merge_passes',
+ 'Sort_range',
+ 'Sort_scan',
+ 'Created_tmp_disk_tables',
+ 'Created_tmp_files',
+ 'Created_tmp_tables',
+ 'Connections',
+ 'Aborted_connects',
+ 'Max_used_connections',
+ 'Binlog_cache_disk_use',
+ 'Binlog_cache_use',
+ 'Threads_connected',
+ 'Threads_created',
+ 'Threads_cached',
+ 'Threads_running',
+ 'Thread_cache_misses',
+ 'Innodb_data_read',
+ 'Innodb_data_written',
+ 'Innodb_data_reads',
+ 'Innodb_data_writes',
+ 'Innodb_data_fsyncs',
+ 'Innodb_data_pending_reads',
+ 'Innodb_data_pending_writes',
+ 'Innodb_data_pending_fsyncs',
+ 'Innodb_log_waits',
+ 'Innodb_log_write_requests',
+ 'Innodb_log_writes',
+ 'Innodb_os_log_fsyncs',
+ 'Innodb_os_log_pending_fsyncs',
+ 'Innodb_os_log_pending_writes',
+ 'Innodb_os_log_written',
+ 'Innodb_row_lock_current_waits',
+ 'Innodb_rows_inserted',
+ 'Innodb_rows_read',
+ 'Innodb_rows_updated',
+ 'Innodb_rows_deleted',
+ 'Innodb_buffer_pool_pages_data',
+ 'Innodb_buffer_pool_pages_dirty',
+ 'Innodb_buffer_pool_pages_free',
+ 'Innodb_buffer_pool_pages_flushed',
+ 'Innodb_buffer_pool_pages_misc',
+ 'Innodb_buffer_pool_pages_total',
+ 'Innodb_buffer_pool_bytes_data',
+ 'Innodb_buffer_pool_bytes_dirty',
+ 'Innodb_buffer_pool_read_ahead',
+ 'Innodb_buffer_pool_read_ahead_evicted',
+ 'Innodb_buffer_pool_read_ahead_rnd',
+ 'Innodb_buffer_pool_read_requests',
+ 'Innodb_buffer_pool_write_requests',
+ 'Innodb_buffer_pool_reads',
+ 'Innodb_buffer_pool_wait_free',
+ 'Qcache_hits',
+ 'Qcache_lowmem_prunes',
+ 'Qcache_inserts',
+ 'Qcache_not_cached',
+ 'Qcache_queries_in_cache',
+ 'Qcache_free_memory',
+ 'Qcache_free_blocks',
+ 'Qcache_total_blocks',
+ 'Key_blocks_unused',
+ 'Key_blocks_used',
+ 'Key_blocks_not_flushed',
+ 'Key_read_requests',
+ 'Key_write_requests',
+ 'Key_reads',
+ 'Key_writes',
+ 'Open_files',
+ 'Opened_files',
+ 'Binlog_stmt_cache_disk_use',
+ 'Binlog_stmt_cache_use',
+ 'Connection_errors_accept',
+ 'Connection_errors_internal',
+ 'Connection_errors_max_connections',
+ 'Connection_errors_peer_address',
+ 'Connection_errors_select',
+ 'Connection_errors_tcpwrap',
+ 'wsrep_local_recv_queue',
+ 'wsrep_local_send_queue',
+ 'wsrep_received',
+ 'wsrep_replicated',
+ 'wsrep_received_bytes',
+ 'wsrep_replicated_bytes',
+ 'wsrep_local_bf_aborts',
+ 'wsrep_local_cert_failures',
+ 'wsrep_flow_control_paused_ns',
+ 'Com_delete',
+ 'Com_insert',
+ 'Com_select',
+ 'Com_update',
+ 'Com_replace'
+]
+
+
+def slave_seconds(value):
+ try:
+ return int(value)
+ except (TypeError, ValueError):
+ return -1
+
+
+def slave_running(value):
+ return 1 if value == 'Yes' else -1
+
+
+SLAVE_STATS = [
+ ('Seconds_Behind_Master', slave_seconds),
+ ('Slave_SQL_Running', slave_running),
+ ('Slave_IO_Running', slave_running)
+]
+
+VARIABLES = [
+ 'max_connections'
+]
+
+ORDER = [
+ 'net',
+ 'queries',
+ 'queries_type',
+ 'handlers',
+ 'table_locks',
+ 'join_issues',
+ 'sort_issues',
+ 'tmp',
+ 'connections',
+ 'connections_active',
+ 'connection_errors',
+ 'binlog_cache',
+ 'binlog_stmt_cache',
+ 'threads',
+ 'thread_cache_misses',
+ 'innodb_io',
+ 'innodb_io_ops',
+ 'innodb_io_pending_ops',
+ 'innodb_log',
+ 'innodb_os_log',
+ 'innodb_os_log_io',
+ 'innodb_cur_row_lock',
+ 'innodb_rows',
+ 'innodb_buffer_pool_pages',
+ 'innodb_buffer_pool_bytes',
+ 'innodb_buffer_pool_read_ahead',
+ 'innodb_buffer_pool_reqs',
+ 'innodb_buffer_pool_ops',
+ 'qcache_ops',
+ 'qcache',
+ 'qcache_freemem',
+ 'qcache_memblocks',
+ 'key_blocks',
+ 'key_requests',
+ 'key_disk_ops',
+ 'files',
+ 'files_rate',
+ 'slave_behind',
+ 'slave_status',
+ 'galera_writesets',
+ 'galera_bytes',
+ 'galera_queue',
+ 'galera_conflicts',
+ 'galera_flow_control'
+]
+
+CHARTS = {
+ 'net': {
+ 'options': [None, 'mysql Bandwidth', 'kilobits/s', 'bandwidth', 'mysql.net', 'area'],
+ 'lines': [
+ ['Bytes_received', 'in', 'incremental', 8, 1024],
+ ['Bytes_sent', 'out', 'incremental', -8, 1024]
+ ]
+ },
+ 'queries': {
+ 'options': [None, 'mysql Queries', 'queries/s', 'queries', 'mysql.queries', 'line'],
+ 'lines': [
+ ['Queries', 'queries', 'incremental'],
+ ['Questions', 'questions', 'incremental'],
+ ['Slow_queries', 'slow_queries', 'incremental']
+ ]
+ },
+ 'queries_type': {
+ 'options': [None, 'mysql Query type', 'queries/s', 'query_types', 'mysql.queries_type', 'stacked'],
+ 'lines': [
+ ['Com_select', 'select', 'incremental'],
+ ['Com_delete', 'delete', 'incremental'],
+ ['Com_update', 'update', 'incremental'],
+ ['Com_insert', 'insert', 'incremental'],
+ ['Qcache_hits', 'cache_hits', 'incremental'],
+ ['Com_replace', 'replace', 'incremental']
+ ]
+ },
+ 'handlers': {
+ 'options': [None, 'mysql Handlers', 'handlers/s', 'handlers', 'mysql.handlers', 'line'],
+ 'lines': [
+ ['Handler_commit', 'commit', 'incremental'],
+ ['Handler_delete', 'delete', 'incremental'],
+ ['Handler_prepare', 'prepare', 'incremental'],
+ ['Handler_read_first', 'read_first', 'incremental'],
+ ['Handler_read_key', 'read_key', 'incremental'],
+ ['Handler_read_next', 'read_next', 'incremental'],
+ ['Handler_read_prev', 'read_prev', 'incremental'],
+ ['Handler_read_rnd', 'read_rnd', 'incremental'],
+ ['Handler_read_rnd_next', 'read_rnd_next', 'incremental'],
+ ['Handler_rollback', 'rollback', 'incremental'],
+ ['Handler_savepoint', 'savepoint', 'incremental'],
+ ['Handler_savepoint_rollback', 'savepoint_rollback', 'incremental'],
+ ['Handler_update', 'update', 'incremental'],
+ ['Handler_write', 'write', 'incremental']
+ ]
+ },
+ 'table_locks': {
+ 'options': [None, 'mysql Tables Locks', 'locks/s', 'locks', 'mysql.table_locks', 'line'],
+ 'lines': [
+ ['Table_locks_immediate', 'immediate', 'incremental'],
+ ['Table_locks_waited', 'waited', 'incremental', -1, 1]
+ ]
+ },
+ 'join_issues': {
+ 'options': [None, 'mysql Select Join Issues', 'joins/s', 'issues', 'mysql.join_issues', 'line'],
+ 'lines': [
+ ['Select_full_join', 'full_join', 'incremental'],
+ ['Select_full_range_join', 'full_range_join', 'incremental'],
+ ['Select_range', 'range', 'incremental'],
+ ['Select_range_check', 'range_check', 'incremental'],
+ ['Select_scan', 'scan', 'incremental']
+ ]
+ },
+ 'sort_issues': {
+ 'options': [None, 'mysql Sort Issues', 'issues/s', 'issues', 'mysql.sort_issues', 'line'],
+ 'lines': [
+ ['Sort_merge_passes', 'merge_passes', 'incremental'],
+ ['Sort_range', 'range', 'incremental'],
+ ['Sort_scan', 'scan', 'incremental']
+ ]
+ },
+ 'tmp': {
+ 'options': [None, 'mysql Tmp Operations', 'counter', 'temporaries', 'mysql.tmp', 'line'],
+ 'lines': [
+ ['Created_tmp_disk_tables', 'disk_tables', 'incremental'],
+ ['Created_tmp_files', 'files', 'incremental'],
+ ['Created_tmp_tables', 'tables', 'incremental']
+ ]
+ },
+ 'connections': {
+ 'options': [None, 'mysql Connections', 'connections/s', 'connections', 'mysql.connections', 'line'],
+ 'lines': [
+ ['Connections', 'all', 'incremental'],
+ ['Aborted_connects', 'aborted', 'incremental']
+ ]
+ },
+ 'connections_active': {
+ 'options': [None, 'mysql Connections Active', 'connections', 'connections', 'mysql.connections_active', 'line'],
+ 'lines': [
+ ['Threads_connected', 'active', 'absolute'],
+ ['max_connections', 'limit', 'absolute'],
+ ['Max_used_connections', 'max_active', 'absolute']
+ ]
+ },
+ 'binlog_cache': {
+ 'options': [None, 'mysql Binlog Cache', 'transactions/s', 'binlog', 'mysql.binlog_cache', 'line'],
+ 'lines': [
+ ['Binlog_cache_disk_use', 'disk', 'incremental'],
+ ['Binlog_cache_use', 'all', 'incremental']
+ ]
+ },
+ 'threads': {
+ 'options': [None, 'mysql Threads', 'threads', 'threads', 'mysql.threads', 'line'],
+ 'lines': [
+ ['Threads_connected', 'connected', 'absolute'],
+ ['Threads_created', 'created', 'incremental'],
+ ['Threads_cached', 'cached', 'absolute', -1, 1],
+ ['Threads_running', 'running', 'absolute'],
+ ]
+ },
+ 'thread_cache_misses': {
+ 'options': [None, 'mysql Threads Cache Misses', 'misses', 'threads', 'mysql.thread_cache_misses', 'area'],
+ 'lines': [
+ ['Thread_cache_misses', 'misses', 'absolute', 1, 100]
+ ]
+ },
+ 'innodb_io': {
+ 'options': [None, 'mysql InnoDB I/O Bandwidth', 'kilobytes/s', 'innodb', 'mysql.innodb_io', 'area'],
+ 'lines': [
+ ['Innodb_data_read', 'read', 'incremental', 1, 1024],
+ ['Innodb_data_written', 'write', 'incremental', -1, 1024]
+ ]
+ },
+ 'innodb_io_ops': {
+ 'options': [None, 'mysql InnoDB I/O Operations', 'operations/s', 'innodb', 'mysql.innodb_io_ops', 'line'],
+ 'lines': [
+ ['Innodb_data_reads', 'reads', 'incremental'],
+ ['Innodb_data_writes', 'writes', 'incremental', -1, 1],
+ ['Innodb_data_fsyncs', 'fsyncs', 'incremental']
+ ]
+ },
+ 'innodb_io_pending_ops': {
+ 'options': [None, 'mysql InnoDB Pending I/O Operations', 'operations', 'innodb',
+ 'mysql.innodb_io_pending_ops', 'line'],
+ 'lines': [
+ ['Innodb_data_pending_reads', 'reads', 'absolute'],
+ ['Innodb_data_pending_writes', 'writes', 'absolute', -1, 1],
+ ['Innodb_data_pending_fsyncs', 'fsyncs', 'absolute']
+ ]
+ },
+ 'innodb_log': {
+ 'options': [None, 'mysql InnoDB Log Operations', 'operations/s', 'innodb', 'mysql.innodb_log', 'line'],
+ 'lines': [
+ ['Innodb_log_waits', 'waits', 'incremental'],
+ ['Innodb_log_write_requests', 'write_requests', 'incremental', -1, 1],
+ ['Innodb_log_writes', 'writes', 'incremental', -1, 1],
+ ]
+ },
+ 'innodb_os_log': {
+ 'options': [None, 'mysql InnoDB OS Log Operations', 'operations', 'innodb', 'mysql.innodb_os_log', 'line'],
+ 'lines': [
+ ['Innodb_os_log_fsyncs', 'fsyncs', 'incremental'],
+ ['Innodb_os_log_pending_fsyncs', 'pending_fsyncs', 'absolute'],
+ ['Innodb_os_log_pending_writes', 'pending_writes', 'absolute', -1, 1],
+ ]
+ },
+ 'innodb_os_log_io': {
+ 'options': [None, 'mysql InnoDB OS Log Bandwidth', 'kilobytes/s', 'innodb', 'mysql.innodb_os_log_io', 'area'],
+ 'lines': [
+ ['Innodb_os_log_written', 'write', 'incremental', -1, 1024],
+ ]
+ },
+ 'innodb_cur_row_lock': {
+ 'options': [None, 'mysql InnoDB Current Row Locks', 'operations', 'innodb',
+ 'mysql.innodb_cur_row_lock', 'area'],
+ 'lines': [
+ ['Innodb_row_lock_current_waits', 'current_waits', 'absolute']
+ ]
+ },
+ 'innodb_rows': {
+ 'options': [None, 'mysql InnoDB Row Operations', 'operations/s', 'innodb', 'mysql.innodb_rows', 'area'],
+ 'lines': [
+ ['Innodb_rows_inserted', 'inserted', 'incremental'],
+ ['Innodb_rows_read', 'read', 'incremental', 1, 1],
+ ['Innodb_rows_updated', 'updated', 'incremental', 1, 1],
+ ['Innodb_rows_deleted', 'deleted', 'incremental', -1, 1],
+ ]
+ },
+ 'innodb_buffer_pool_pages': {
+ 'options': [None, 'mysql InnoDB Buffer Pool Pages', 'pages', 'innodb',
+ 'mysql.innodb_buffer_pool_pages', 'line'],
+ 'lines': [
+ ['Innodb_buffer_pool_pages_data', 'data', 'absolute'],
+ ['Innodb_buffer_pool_pages_dirty', 'dirty', 'absolute', -1, 1],
+ ['Innodb_buffer_pool_pages_free', 'free', 'absolute'],
+ ['Innodb_buffer_pool_pages_flushed', 'flushed', 'incremental', -1, 1],
+ ['Innodb_buffer_pool_pages_misc', 'misc', 'absolute', -1, 1],
+ ['Innodb_buffer_pool_pages_total', 'total', 'absolute']
+ ]
+ },
+ 'innodb_buffer_pool_bytes': {
+ 'options': [None, 'mysql InnoDB Buffer Pool Bytes', 'MB', 'innodb', 'mysql.innodb_buffer_pool_bytes', 'area'],
+ 'lines': [
+ ['Innodb_buffer_pool_bytes_data', 'data', 'absolute', 1, 1024 * 1024],
+ ['Innodb_buffer_pool_bytes_dirty', 'dirty', 'absolute', -1, 1024 * 1024]
+ ]
+ },
+ 'innodb_buffer_pool_read_ahead': {
+ 'options': [None, 'mysql InnoDB Buffer Pool Read Ahead', 'operations/s', 'innodb',
+ 'mysql.innodb_buffer_pool_read_ahead', 'area'],
+ 'lines': [
+ ['Innodb_buffer_pool_read_ahead', 'all', 'incremental'],
+ ['Innodb_buffer_pool_read_ahead_evicted', 'evicted', 'incremental', -1, 1],
+ ['Innodb_buffer_pool_read_ahead_rnd', 'random', 'incremental']
+ ]
+ },
+ 'innodb_buffer_pool_reqs': {
+ 'options': [None, 'mysql InnoDB Buffer Pool Requests', 'requests/s', 'innodb',
+ 'mysql.innodb_buffer_pool_reqs', 'area'],
+ 'lines': [
+ ['Innodb_buffer_pool_read_requests', 'reads', 'incremental'],
+ ['Innodb_buffer_pool_write_requests', 'writes', 'incremental', -1, 1]
+ ]
+ },
+ 'innodb_buffer_pool_ops': {
+ 'options': [None, 'mysql InnoDB Buffer Pool Operations', 'operations/s', 'innodb',
+ 'mysql.innodb_buffer_pool_ops', 'area'],
+ 'lines': [
+ ['Innodb_buffer_pool_reads', 'disk reads', 'incremental'],
+ ['Innodb_buffer_pool_wait_free', 'wait free', 'incremental', -1, 1]
+ ]
+ },
+ 'qcache_ops': {
+ 'options': [None, 'mysql QCache Operations', 'queries/s', 'qcache', 'mysql.qcache_ops', 'line'],
+ 'lines': [
+ ['Qcache_hits', 'hits', 'incremental'],
+ ['Qcache_lowmem_prunes', 'lowmem prunes', 'incremental', -1, 1],
+ ['Qcache_inserts', 'inserts', 'incremental'],
+ ['Qcache_not_cached', 'not cached', 'incremental', -1, 1]
+ ]
+ },
+ 'qcache': {
+ 'options': [None, 'mysql QCache Queries in Cache', 'queries', 'qcache', 'mysql.qcache', 'line'],
+ 'lines': [
+ ['Qcache_queries_in_cache', 'queries', 'absolute']
+ ]
+ },
+ 'qcache_freemem': {
+ 'options': [None, 'mysql QCache Free Memory', 'MB', 'qcache', 'mysql.qcache_freemem', 'area'],
+ 'lines': [
+ ['Qcache_free_memory', 'free', 'absolute', 1, 1024 * 1024]
+ ]
+ },
+ 'qcache_memblocks': {
+ 'options': [None, 'mysql QCache Memory Blocks', 'blocks', 'qcache', 'mysql.qcache_memblocks', 'line'],
+ 'lines': [
+ ['Qcache_free_blocks', 'free', 'absolute'],
+ ['Qcache_total_blocks', 'total', 'absolute']
+ ]
+ },
+ 'key_blocks': {
+ 'options': [None, 'mysql MyISAM Key Cache Blocks', 'blocks', 'myisam', 'mysql.key_blocks', 'line'],
+ 'lines': [
+ ['Key_blocks_unused', 'unused', 'absolute'],
+ ['Key_blocks_used', 'used', 'absolute', -1, 1],
+ ['Key_blocks_not_flushed', 'not flushed', 'absolute']
+ ]
+ },
+ 'key_requests': {
+ 'options': [None, 'mysql MyISAM Key Cache Requests', 'requests/s', 'myisam', 'mysql.key_requests', 'area'],
+ 'lines': [
+ ['Key_read_requests', 'reads', 'incremental'],
+ ['Key_write_requests', 'writes', 'incremental', -1, 1]
+ ]
+ },
+ 'key_disk_ops': {
+ 'options': [None, 'mysql MyISAM Key Cache Disk Operations', 'operations/s',
+ 'myisam', 'mysql.key_disk_ops', 'area'],
+ 'lines': [
+ ['Key_reads', 'reads', 'incremental'],
+ ['Key_writes', 'writes', 'incremental', -1, 1]
+ ]
+ },
+ 'files': {
+ 'options': [None, 'mysql Open Files', 'files', 'files', 'mysql.files', 'line'],
+ 'lines': [
+ ['Open_files', 'files', 'absolute']
+ ]
+ },
+ 'files_rate': {
+ 'options': [None, 'mysql Opened Files Rate', 'files/s', 'files', 'mysql.files_rate', 'line'],
+ 'lines': [
+ ['Opened_files', 'files', 'incremental']
+ ]
+ },
+ 'binlog_stmt_cache': {
+ 'options': [None, 'mysql Binlog Statement Cache', 'statements/s', 'binlog',
+ 'mysql.binlog_stmt_cache', 'line'],
+ 'lines': [
+ ['Binlog_stmt_cache_disk_use', 'disk', 'incremental'],
+ ['Binlog_stmt_cache_use', 'all', 'incremental']
+ ]
+ },
+ 'connection_errors': {
+ 'options': [None, 'mysql Connection Errors', 'connections/s', 'connections',
+ 'mysql.connection_errors', 'line'],
+ 'lines': [
+ ['Connection_errors_accept', 'accept', 'incremental'],
+ ['Connection_errors_internal', 'internal', 'incremental'],
+ ['Connection_errors_max_connections', 'max', 'incremental'],
+ ['Connection_errors_peer_address', 'peer_addr', 'incremental'],
+ ['Connection_errors_select', 'select', 'incremental'],
+ ['Connection_errors_tcpwrap', 'tcpwrap', 'incremental']
+ ]
+ },
+ 'slave_behind': {
+ 'options': [None, 'Slave Behind Seconds', 'seconds', 'slave', 'mysql.slave_behind', 'line'],
+ 'lines': [
+ ['Seconds_Behind_Master', 'seconds', 'absolute']
+ ]
+ },
+ 'slave_status': {
+ 'options': [None, 'Slave Status', 'status', 'slave', 'mysql.slave_status', 'line'],
+ 'lines': [
+ ['Slave_SQL_Running', 'sql_running', 'absolute'],
+ ['Slave_IO_Running', 'io_running', 'absolute']
+ ]
+ },
+ 'galera_writesets': {
+ 'options': [None, 'Replicated writesets', 'writesets/s', 'galera', 'mysql.galera_writesets', 'line'],
+ 'lines': [
+ ['wsrep_received', 'rx', 'incremental'],
+ ['wsrep_replicated', 'tx', 'incremental', -1, 1],
+ ]
+ },
+ 'galera_bytes': {
+ 'options': [None, 'Replicated bytes', 'KB/s', 'galera', 'mysql.galera_bytes', 'area'],
+ 'lines': [
+ ['wsrep_received_bytes', 'rx', 'incremental', 1, 1024],
+ ['wsrep_replicated_bytes', 'tx', 'incremental', -1, 1024],
+ ]
+ },
+ 'galera_queue': {
+ 'options': [None, 'Galera queue', 'writesets', 'galera', 'mysql.galera_queue', 'line'],
+ 'lines': [
+ ['wsrep_local_recv_queue', 'rx', 'absolute'],
+ ['wsrep_local_send_queue', 'tx', 'absolute', -1, 1],
+ ]
+ },
+ 'galera_conflicts': {
+ 'options': [None, 'Replication conflicts', 'transactions', 'galera', 'mysql.galera_conflicts', 'area'],
+ 'lines': [
+ ['wsrep_local_bf_aborts', 'bf_aborts', 'incremental'],
+ ['wsrep_local_cert_failures', 'cert_fails', 'incremental', -1, 1],
+ ]
+ },
+ 'galera_flow_control': {
+ 'options': [None, 'Flow control', 'millisec', 'galera', 'mysql.galera_flow_control', 'area'],
+ 'lines': [
+ ['wsrep_flow_control_paused_ns', 'paused', 'incremental', 1, 1000000],
+ ]
+ }
+}
+
+
+class Service(MySQLService):
+ def __init__(self, configuration=None, name=None):
+ MySQLService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.queries = dict(global_status=QUERY_GLOBAL, slave_status=QUERY_SLAVE, variables=QUERY_VARIABLES)
+
+ def _get_data(self):
+
+ raw_data = self._get_raw_data(description=True)
+
+ if not raw_data:
+ return None
+
+ to_netdata = dict()
+
+ if 'global_status' in raw_data:
+ global_status = dict(raw_data['global_status'][0])
+ for key in GLOBAL_STATS:
+ if key in global_status:
+ to_netdata[key] = global_status[key]
+ if 'Threads_created' in to_netdata and 'Connections' in to_netdata:
+ to_netdata['Thread_cache_misses'] = round(int(to_netdata['Threads_created'])
+ / float(to_netdata['Connections']) * 10000)
+
+ if 'slave_status' in raw_data:
+ if raw_data['slave_status'][0]:
+ slave_raw_data = dict(zip([e[0] for e in raw_data['slave_status'][1]], raw_data['slave_status'][0][0]))
+ for key, func in SLAVE_STATS:
+ if key in slave_raw_data:
+ to_netdata[key] = func(slave_raw_data[key])
+ else:
+ self.queries.pop('slave_status')
+
+ if 'variables' in raw_data:
+ variables = dict(raw_data['variables'][0])
+ for key in VARIABLES:
+ if key in variables:
+ to_netdata[key] = variables[key]
+
+ return to_netdata or None
diff --git a/collectors/python.d.plugin/mysql/mysql.conf b/collectors/python.d.plugin/mysql/mysql.conf
new file mode 100644
index 000000000..b5956a2c6
--- /dev/null
+++ b/collectors/python.d.plugin/mysql/mysql.conf
@@ -0,0 +1,286 @@
+# netdata python.d.plugin configuration for mysql
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, mysql also supports the following:
+#
+# socket: 'path/to/mysql.sock'
+#
+# or
+# host: 'IP or HOSTNAME' # the host to connect to
+# port: PORT # the port to connect to
+#
+# in all cases, the following can also be set:
+#
+# user: 'username' # the mysql username to use
+# pass: 'password' # the mysql password to use
+#
+
+# ----------------------------------------------------------------------
+# mySQL CONFIGURATION
+#
+# netdata does not need any privilege - only the ability to connect
+# to the mysql server (netdata will not be able to see any data).
+#
+# Execute these commands to give the local user 'netdata' the ability
+# to connect to the mysql server on localhost, without a password:
+#
+# > create user 'netdata'@'localhost';
+# > grant usage on *.* to 'netdata'@'localhost';
+# > flush privileges;
+#
+# with the above statements, netdata will be able to gather mysql
+# statistics, without the ability to see or alter any data or affect
+# mysql operation in any way. No change is required below.
+#
+# If you need to monitor mysql replication too, use this instead:
+#
+# > create user 'netdata'@'localhost';
+# > grant replication client on *.* to 'netdata'@'localhost';
+# > flush privileges;
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+mycnf1:
+ name : 'local'
+ 'my.cnf' : '/etc/my.cnf'
+
+mycnf2:
+ name : 'local'
+ 'my.cnf' : '/etc/mysql/my.cnf'
+
+debiancnf:
+ name : 'local'
+ 'my.cnf' : '/etc/mysql/debian.cnf'
+
+socket1:
+ name : 'local'
+ # user : ''
+ # pass : ''
+ socket : '/var/run/mysqld/mysqld.sock'
+
+socket2:
+ name : 'local'
+ # user : ''
+ # pass : ''
+ socket : '/var/run/mysqld/mysql.sock'
+
+socket3:
+ name : 'local'
+ # user : ''
+ # pass : ''
+ socket : '/var/lib/mysql/mysql.sock'
+
+socket4:
+ name : 'local'
+ # user : ''
+ # pass : ''
+ socket : '/tmp/mysql.sock'
+
+tcp:
+ name : 'local'
+ # user : ''
+ # pass : ''
+ host : 'localhost'
+ port : '3306'
+ # keep in mind port might be ignored by mysql, if host = 'localhost'
+ # http://serverfault.com/questions/337818/how-to-force-mysql-to-connect-by-tcp-instead-of-a-unix-socket/337844#337844
+
+tcpipv4:
+ name : 'local'
+ # user : ''
+ # pass : ''
+ host : '127.0.0.1'
+ port : '3306'
+
+tcpipv6:
+ name : 'local'
+ # user : ''
+ # pass : ''
+ host : '::1'
+ port : '3306'
+
+
+# Now we try the same as above with user: root
+# A few systems configure mysql to accept passwordless
+# root access.
+
+mycnf1_root:
+ name : 'local'
+ user : 'root'
+ 'my.cnf' : '/etc/my.cnf'
+
+mycnf2_root:
+ name : 'local'
+ user : 'root'
+ 'my.cnf' : '/etc/mysql/my.cnf'
+
+socket1_root:
+ name : 'local'
+ user : 'root'
+ # pass : ''
+ socket : '/var/run/mysqld/mysqld.sock'
+
+socket2_root:
+ name : 'local'
+ user : 'root'
+ # pass : ''
+ socket : '/var/run/mysqld/mysql.sock'
+
+socket3_root:
+ name : 'local'
+ user : 'root'
+ # pass : ''
+ socket : '/var/lib/mysql/mysql.sock'
+
+socket4_root:
+ name : 'local'
+ user : 'root'
+ # pass : ''
+ socket : '/tmp/mysql.sock'
+
+tcp_root:
+ name : 'local'
+ user : 'root'
+ # pass : ''
+ host : 'localhost'
+ port : '3306'
+ # keep in mind port might be ignored by mysql, if host = 'localhost'
+ # http://serverfault.com/questions/337818/how-to-force-mysql-to-connect-by-tcp-instead-of-a-unix-socket/337844#337844
+
+tcpipv4_root:
+ name : 'local'
+ user : 'root'
+ # pass : ''
+ host : '127.0.0.1'
+ port : '3306'
+
+tcpipv6_root:
+ name : 'local'
+ user : 'root'
+ # pass : ''
+ host : '::1'
+ port : '3306'
+
+
+# Now we try the same as above with user: netdata
+
+mycnf1_netdata:
+ name : 'local'
+ user : 'netdata'
+ 'my.cnf' : '/etc/my.cnf'
+
+mycnf2_netdata:
+ name : 'local'
+ user : 'netdata'
+ 'my.cnf' : '/etc/mysql/my.cnf'
+
+socket1_netdata:
+ name : 'local'
+ user : 'netdata'
+ # pass : ''
+ socket : '/var/run/mysqld/mysqld.sock'
+
+socket2_netdata:
+ name : 'local'
+ user : 'netdata'
+ # pass : ''
+ socket : '/var/run/mysqld/mysql.sock'
+
+socket3_netdata:
+ name : 'local'
+ user : 'netdata'
+ # pass : ''
+ socket : '/var/lib/mysql/mysql.sock'
+
+socket4_netdata:
+ name : 'local'
+ user : 'netdata'
+ # pass : ''
+ socket : '/tmp/mysql.sock'
+
+tcp_netdata:
+ name : 'local'
+ user : 'netdata'
+ # pass : ''
+ host : 'localhost'
+ port : '3306'
+ # keep in mind port might be ignored by mysql, if host = 'localhost'
+ # http://serverfault.com/questions/337818/how-to-force-mysql-to-connect-by-tcp-instead-of-a-unix-socket/337844#337844
+
+tcpipv4_netdata:
+ name : 'local'
+ user : 'netdata'
+ # pass : ''
+ host : '127.0.0.1'
+ port : '3306'
+
+tcpipv6_netdata:
+ name : 'local'
+ user : 'netdata'
+ # pass : ''
+ host : '::1'
+ port : '3306'
+
diff --git a/collectors/python.d.plugin/nginx/Makefile.inc b/collectors/python.d.plugin/nginx/Makefile.inc
new file mode 100644
index 000000000..4636aa830
--- /dev/null
+++ b/collectors/python.d.plugin/nginx/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += nginx/nginx.chart.py
+dist_pythonconfig_DATA += nginx/nginx.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += nginx/README.md nginx/Makefile.inc
+
diff --git a/collectors/python.d.plugin/nginx/README.md b/collectors/python.d.plugin/nginx/README.md
new file mode 100644
index 000000000..007f45c7c
--- /dev/null
+++ b/collectors/python.d.plugin/nginx/README.md
@@ -0,0 +1,45 @@
+# nginx
+
+This module will monitor one or more nginx servers depending on configuration. Servers can be either local or remote.
+
+**Requirements:**
+ * nginx with configured 'ngx_http_stub_status_module'
+ * 'location /stub_status'
+
+Example nginx configuration can be found in 'python.d/nginx.conf'
+
+It produces following charts:
+
+1. **Active Connections**
+ * active
+
+2. **Requests** in requests/s
+ * requests
+
+3. **Active Connections by Status**
+ * reading
+ * writing
+ * waiting
+
+4. **Connections Rate** in connections/s
+ * accepts
+ * handled
+
+### configuration
+
+Needs only `url` to server's `stub_status`
+
+Here is an example for local server:
+
+```yaml
+update_every : 10
+priority : 90100
+
+local:
+ url : 'http://localhost/stub_status'
+ retries : 10
+```
+
+Without configuration, module attempts to connect to `http://localhost/stub_status`
+
+---
diff --git a/collectors/python.d.plugin/nginx/nginx.chart.py b/collectors/python.d.plugin/nginx/nginx.chart.py
new file mode 100644
index 000000000..09c6bbd37
--- /dev/null
+++ b/collectors/python.d.plugin/nginx/nginx.chart.py
@@ -0,0 +1,80 @@
+# -*- coding: utf-8 -*-
+# Description: nginx netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from bases.FrameworkServices.UrlService import UrlService
+
+# default module values (can be overridden per job in `config`)
+# update_every = 2
+priority = 60000
+retries = 60
+
+# default job configuration (overridden by python.d.plugin)
+# config = {'local': {
+# 'update_every': update_every,
+# 'retries': retries,
+# 'priority': priority,
+# 'url': 'http://localhost/stub_status'
+# }}
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = ['connections', 'requests', 'connection_status', 'connect_rate']
+
+CHARTS = {
+ 'connections': {
+ 'options': [None, 'nginx Active Connections', 'connections', 'active connections',
+ 'nginx.connections', 'line'],
+ 'lines': [
+ ['active']
+ ]
+ },
+ 'requests': {
+ 'options': [None, 'nginx Requests', 'requests/s', 'requests', 'nginx.requests', 'line'],
+ 'lines': [
+ ['requests', None, 'incremental']
+ ]
+ },
+ 'connection_status': {
+ 'options': [None, 'nginx Active Connections by Status', 'connections', 'status',
+ 'nginx.connection_status', 'line'],
+ 'lines': [
+ ['reading'],
+ ['writing'],
+ ['waiting', 'idle']
+ ]
+ },
+ 'connect_rate': {
+ 'options': [None, 'nginx Connections Rate', 'connections/s', 'connections rate',
+ 'nginx.connect_rate', 'line'],
+ 'lines': [
+ ['accepts', 'accepted', 'incremental'],
+ ['handled', None, 'incremental']
+ ]
+ }
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.url = self.configuration.get('url', 'http://localhost/stub_status')
+ self.order = ORDER
+ self.definitions = CHARTS
+
+ def _get_data(self):
+ """
+ Format data received from http request
+ :return: dict
+ """
+ try:
+ raw = self._get_raw_data().split(" ")
+ return {'active': int(raw[2]),
+ 'requests': int(raw[9]),
+ 'reading': int(raw[11]),
+ 'writing': int(raw[13]),
+ 'waiting': int(raw[15]),
+ 'accepts': int(raw[7]),
+ 'handled': int(raw[8])}
+ except (ValueError, AttributeError):
+ return None
diff --git a/collectors/python.d.plugin/nginx/nginx.conf b/collectors/python.d.plugin/nginx/nginx.conf
new file mode 100644
index 000000000..71c521066
--- /dev/null
+++ b/collectors/python.d.plugin/nginx/nginx.conf
@@ -0,0 +1,109 @@
+# netdata python.d.plugin configuration for nginx
+#
+# You must have ngx_http_stub_status_module configured on your nginx server for this
+# plugin to work. The following is an example config.
+# It must be located inside a server { } block.
+#
+# location /stub_status {
+# stub_status;
+# # Security: Only allow access from the IP below.
+# allow 192.168.1.200;
+# # Deny anyone else
+# deny all;
+# }
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, this plugin also supports the following:
+#
+# url: 'URL' # the URL to fetch nginx's status stats
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+# Example
+#
+# RemoteNginx:
+# name : 'Reverse_Proxy'
+# url : 'http://yourdomain.com/stub_status'
+#
+# "RemoteNginx" will show up in Netdata logs. "Reverse Proxy" will show up in the menu
+# in the nginx section.
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ url : 'http://localhost/stub_status'
+
+localipv4:
+ name : 'local'
+ url : 'http://127.0.0.1/stub_status'
+
+localipv6:
+ name : 'local'
+ url : 'http://[::1]/stub_status'
+
diff --git a/collectors/python.d.plugin/nginx_plus/Makefile.inc b/collectors/python.d.plugin/nginx_plus/Makefile.inc
new file mode 100644
index 000000000..d3fdeaf2b
--- /dev/null
+++ b/collectors/python.d.plugin/nginx_plus/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += nginx_plus/nginx_plus.chart.py
+dist_pythonconfig_DATA += nginx_plus/nginx_plus.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += nginx_plus/README.md nginx_plus/Makefile.inc
+
diff --git a/collectors/python.d.plugin/nginx_plus/README.md b/collectors/python.d.plugin/nginx_plus/README.md
new file mode 100644
index 000000000..43ec867a3
--- /dev/null
+++ b/collectors/python.d.plugin/nginx_plus/README.md
@@ -0,0 +1,125 @@
+# nginx_plus
+
+This module will monitor one or more nginx_plus servers depending on configuration.
+Servers can be either local or remote.
+
+Example nginx_plus configuration can be found in 'python.d/nginx_plus.conf'
+
+It produces following charts:
+
+1. **Requests total** in requests/s
+ * total
+
+2. **Requests current** in requests
+ * current
+
+3. **Connection Statistics** in connections/s
+ * accepted
+ * dropped
+
+4. **Workers Statistics** in workers
+ * idle
+ * active
+
+5. **SSL Handshakes** in handshakes/s
+ * successful
+ * failed
+
+6. **SSL Session Reuses** in sessions/s
+ * reused
+
+7. **SSL Memory Usage** in percent
+ * usage
+
+8. **Processes** in processes
+ * respawned
+
+For every server zone:
+
+1. **Processing** in requests
+ * processing
+
+2. **Requests** in requests/s
+ * requests
+
+3. **Responses** in requests/s
+ * 1xx
+ * 2xx
+ * 3xx
+ * 4xx
+ * 5xx
+
+4. **Traffic** in kilobits/s
+ * received
+ * sent
+
+For every upstream:
+
+1. **Peers Requests** in requests/s
+ * peer name (dimension per peer)
+
+2. **All Peers Responses** in responses/s
+ * 1xx
+ * 2xx
+ * 3xx
+ * 4xx
+ * 5xx
+
+3. **Peer Responses** in requests/s (for every peer)
+ * 1xx
+ * 2xx
+ * 3xx
+ * 4xx
+ * 5xx
+
+4. **Peers Connections** in active
+ * peer name (dimension per peer)
+
+5. **Peers Connections Usage** in percent
+ * peer name (dimension per peer)
+
+6. **All Peers Traffic** in KB
+ * received
+ * sent
+
+7. **Peer Traffic** in KB/s (for every peer)
+ * received
+ * sent
+
+8. **Peer Timings** in ms (for every peer)
+ * header
+ * response
+
+9. **Memory Usage** in percent
+ * usage
+
+10. **Peers Status** in state
+ * peer name (dimension per peer)
+
+11. **Peers Total Downtime** in seconds
+ * peer name (dimension per peer)
+
+For every cache:
+
+1. **Traffic** in KB
+ * served
+ * written
+ * bypass
+
+2. **Memory Usage** in percent
+ * usage
+
+### configuration
+
+Needs only `url` to server's `status`
+
+Here is an example for local server:
+
+```yaml
+local:
+ url : 'http://localhost/status'
+```
+
+Without configuration, module fail to start.
+
+---
diff --git a/collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py b/collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py
new file mode 100644
index 000000000..1392f5a56
--- /dev/null
+++ b/collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py
@@ -0,0 +1,492 @@
+# -*- coding: utf-8 -*-
+# Description: nginx_plus netdata python.d module
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import re
+
+from collections import defaultdict
+from copy import deepcopy
+from json import loads
+
+try:
+ from collections import OrderedDict
+except ImportError:
+ from third_party.ordereddict import OrderedDict
+
+from bases.FrameworkServices.UrlService import UrlService
+
+# default module values (can be overridden per job in `config`)
+update_every = 1
+priority = 60000
+retries = 60
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = [
+ 'requests_total',
+ 'requests_current',
+ 'connections_statistics',
+ 'connections_workers',
+ 'ssl_handshakes',
+ 'ssl_session_reuses',
+ 'ssl_memory_usage',
+ 'processes'
+]
+
+CHARTS = {
+ 'requests_total': {
+ 'options': [None, 'Requests Total', 'requests/s', 'requests', 'nginx_plus.requests_total', 'line'],
+ 'lines': [
+ ['requests_total', 'total', 'incremental']
+ ]
+ },
+ 'requests_current': {
+ 'options': [None, 'Requests Current', 'requests', 'requests', 'nginx_plus.requests_current', 'line'],
+ 'lines': [
+ ['requests_current', 'current']
+ ]
+ },
+ 'connections_statistics': {
+ 'options': [None, 'Connections Statistics', 'connections/s',
+ 'connections', 'nginx_plus.connections_statistics', 'stacked'],
+ 'lines': [
+ ['connections_accepted', 'accepted', 'incremental'],
+ ['connections_dropped', 'dropped', 'incremental']
+ ]
+ },
+ 'connections_workers': {
+ 'options': [None, 'Workers Statistics', 'workers',
+ 'connections', 'nginx_plus.connections_workers', 'stacked'],
+ 'lines': [
+ ['connections_idle', 'idle'],
+ ['connections_active', 'active']
+ ]
+ },
+ 'ssl_handshakes': {
+ 'options': [None, 'SSL Handshakes', 'handshakes/s', 'ssl', 'nginx_plus.ssl_handshakes', 'stacked'],
+ 'lines': [
+ ['ssl_handshakes', 'successful', 'incremental'],
+ ['ssl_handshakes_failed', 'failed', 'incremental']
+ ]
+ },
+ 'ssl_session_reuses': {
+ 'options': [None, 'Session Reuses', 'sessions/s', 'ssl', 'nginx_plus.ssl_session_reuses', 'line'],
+ 'lines': [
+ ['ssl_session_reuses', 'reused', 'incremental']
+ ]
+ },
+ 'ssl_memory_usage': {
+ 'options': [None, 'Memory Usage', '%', 'ssl', 'nginx_plus.ssl_memory_usage', 'area'],
+ 'lines': [
+ ['ssl_memory_usage', 'usage', 'absolute', 1, 100]
+ ]
+ },
+ 'processes': {
+ 'options': [None, 'Processes', 'processes', 'processes', 'nginx_plus.processes', 'line'],
+ 'lines': [
+ ['processes_respawned', 'respawned']
+ ]
+ }
+}
+
+
+def cache_charts(cache):
+ family = 'cache {0}'.format(cache.real_name)
+ charts = OrderedDict()
+
+ charts['{0}_traffic'.format(cache.name)] = {
+ 'options': [None, 'Traffic', 'KB', family, 'nginx_plus.cache_traffic', 'stacked'],
+ 'lines': [
+ ['_'.join([cache.name, 'hit_bytes']), 'served', 'absolute', 1, 1024],
+ ['_'.join([cache.name, 'miss_bytes_written']), 'written', 'absolute', 1, 1024],
+ ['_'.join([cache.name, 'miss_bytes']), 'bypass', 'absolute', 1, 1024]
+ ]
+ }
+ charts['{0}_memory_usage'.format(cache.name)] = {
+ 'options': [None, 'Memory Usage', '%', family, 'nginx_plus.cache_memory_usage', 'area'],
+ 'lines': [
+ ['_'.join([cache.name, 'memory_usage']), 'usage', 'absolute', 1, 100],
+ ]
+ }
+ return charts
+
+
+def web_zone_charts(wz):
+ charts = OrderedDict()
+ family = 'web zone {name}'.format(name=wz.real_name)
+
+ # Processing
+ charts['zone_{name}_processing'.format(name=wz.name)] = {
+ 'options': [None, 'Zone "{name}" Processing'.format(name=wz.name), 'requests', family,
+ 'nginx_plus.web_zone_processing', 'line'],
+ 'lines': [
+ ['_'.join([wz.name, 'processing']), 'processing']
+ ]
+ }
+ # Requests
+ charts['zone_{name}_requests'.format(name=wz.name)] = {
+ 'options': [None, 'Zone "{name}" Requests'.format(name=wz.name), 'requests/s', family,
+ 'nginx_plus.web_zone_requests', 'line'],
+ 'lines': [
+ ['_'.join([wz.name, 'requests']), 'requests', 'incremental']
+ ]
+ }
+ # Response Codes
+ charts['zone_{name}_responses'.format(name=wz.name)] = {
+ 'options': [None, 'Zone "{name}" Responses'.format(name=wz.name), 'requests/s', family,
+ 'nginx_plus.web_zone_responses', 'stacked'],
+ 'lines': [
+ ['_'.join([wz.name, 'responses_2xx']), '2xx', 'incremental'],
+ ['_'.join([wz.name, 'responses_5xx']), '5xx', 'incremental'],
+ ['_'.join([wz.name, 'responses_3xx']), '3xx', 'incremental'],
+ ['_'.join([wz.name, 'responses_4xx']), '4xx', 'incremental'],
+ ['_'.join([wz.name, 'responses_1xx']), '1xx', 'incremental']
+ ]
+ }
+ # Traffic
+ charts['zone_{name}_net'.format(name=wz.name)] = {
+ 'options': [None, 'Zone "{name}" Traffic'.format(name=wz.name), 'kilobits/s', family,
+ 'nginx_plus.zone_net', 'area'],
+ 'lines': [
+ ['_'.join([wz.name, 'received']), 'received', 'incremental', 1, 1000],
+ ['_'.join([wz.name, 'sent']), 'sent', 'incremental', -1, 1000]
+ ]
+ }
+ return charts
+
+
+def web_upstream_charts(wu):
+ def dimensions(value, a='absolute', m=1, d=1):
+ dims = list()
+ for p in wu:
+ dims.append(['_'.join([wu.name, p.server, value]), p.real_server, a, m, d])
+ return dims
+
+ charts = OrderedDict()
+ family = 'web upstream {name}'.format(name=wu.real_name)
+
+ # Requests
+ charts['web_upstream_{name}_requests'.format(name=wu.name)] = {
+ 'options': [None, 'Peers Requests', 'requests/s', family, 'nginx_plus.web_upstream_requests', 'line'],
+ 'lines': dimensions('requests', 'incremental')
+ }
+ # Responses Codes
+ charts['web_upstream_{name}_all_responses'.format(name=wu.name)] = {
+ 'options': [None, 'All Peers Responses', 'responses/s', family,
+ 'nginx_plus.web_upstream_all_responses', 'stacked'],
+ 'lines': [
+ ['_'.join([wu.name, 'responses_2xx']), '2xx', 'incremental'],
+ ['_'.join([wu.name, 'responses_5xx']), '5xx', 'incremental'],
+ ['_'.join([wu.name, 'responses_3xx']), '3xx', 'incremental'],
+ ['_'.join([wu.name, 'responses_4xx']), '4xx', 'incremental'],
+ ['_'.join([wu.name, 'responses_1xx']), '1xx', 'incremental'],
+ ]
+ }
+ for peer in wu:
+ charts['web_upstream_{0}_{1}_responses'.format(wu.name, peer.server)] = {
+ 'options': [None, 'Peer "{0}" Responses'.format(peer.real_server), 'responses/s', family,
+ 'nginx_plus.web_upstream_peer_responses', 'stacked'],
+ 'lines': [
+ ['_'.join([wu.name, peer.server, 'responses_2xx']), '2xx', 'incremental'],
+ ['_'.join([wu.name, peer.server, 'responses_5xx']), '5xx', 'incremental'],
+ ['_'.join([wu.name, peer.server, 'responses_3xx']), '3xx', 'incremental'],
+ ['_'.join([wu.name, peer.server, 'responses_4xx']), '4xx', 'incremental'],
+ ['_'.join([wu.name, peer.server, 'responses_1xx']), '1xx', 'incremental']
+ ]
+ }
+ # Connections
+ charts['web_upstream_{name}_connections'.format(name=wu.name)] = {
+ 'options': [None, 'Peers Connections', 'active', family, 'nginx_plus.web_upstream_connections', 'line'],
+ 'lines': dimensions('active')
+ }
+ charts['web_upstream_{name}_connections_usage'.format(name=wu.name)] = {
+ 'options': [None, 'Peers Connections Usage', '%', family, 'nginx_plus.web_upstream_connections_usage', 'line'],
+ 'lines': dimensions('connections_usage', d=100)
+ }
+ # Traffic
+ charts['web_upstream_{0}_all_net'.format(wu.name)] = {
+ 'options': [None, 'All Peers Traffic', 'kilobits/s', family, 'nginx_plus.web_upstream_all_net', 'area'],
+ 'lines': [
+ ['{0}_received'.format(wu.name), 'received', 'incremental', 1, 1000],
+ ['{0}_sent'.format(wu.name), 'sent', 'incremental', -1, 1000]
+ ]
+ }
+ for peer in wu:
+ charts['web_upstream_{0}_{1}_net'.format(wu.name, peer.server)] = {
+ 'options': [None, 'Peer "{0}" Traffic'.format(peer.real_server), 'kilobits/s', family,
+ 'nginx_plus.web_upstream_peer_traffic', 'area'],
+ 'lines': [
+ ['{0}_{1}_received'.format(wu.name, peer.server), 'received', 'incremental', 1, 1000],
+ ['{0}_{1}_sent'.format(wu.name, peer.server), 'sent', 'incremental', -1, 1000]
+ ]
+ }
+ # Response Time
+ for peer in wu:
+ charts['web_upstream_{0}_{1}_timings'.format(wu.name, peer.server)] = {
+ 'options': [None, 'Peer "{0}" Timings'.format(peer.real_server), 'ms', family,
+ 'nginx_plus.web_upstream_peer_timings', 'line'],
+ 'lines': [
+ ['_'.join([wu.name, peer.server, 'header_time']), 'header'],
+ ['_'.join([wu.name, peer.server, 'response_time']), 'response']
+ ]
+ }
+ # Memory Usage
+ charts['web_upstream_{name}_memory_usage'.format(name=wu.name)] = {
+ 'options': [None, 'Memory Usage', '%', family, 'nginx_plus.web_upstream_memory_usage', 'area'],
+ 'lines': [
+ ['_'.join([wu.name, 'memory_usage']), 'usage', 'absolute', 1, 100]
+ ]
+ }
+ # State
+ charts['web_upstream_{name}_status'.format(name=wu.name)] = {
+ 'options': [None, 'Peers Status', 'state', family, 'nginx_plus.web_upstream_status', 'line'],
+ 'lines': dimensions('state')
+ }
+ # Downtime
+ charts['web_upstream_{name}_downtime'.format(name=wu.name)] = {
+ 'options': [None, 'Peers Downtime', 'seconds', family, 'nginx_plus.web_upstream_peer_downtime', 'line'],
+ 'lines': dimensions('downtime', d=1000)
+ }
+
+ return charts
+
+
+METRICS = {
+ 'SERVER': [
+ 'processes.respawned',
+ 'connections.accepted',
+ 'connections.dropped',
+ 'connections.active',
+ 'connections.idle',
+ 'ssl.handshakes',
+ 'ssl.handshakes_failed',
+ 'ssl.session_reuses',
+ 'requests.total',
+ 'requests.current',
+ 'slabs.SSL.pages.free',
+ 'slabs.SSL.pages.used'
+ ],
+ 'WEB_ZONE': [
+ 'processing',
+ 'requests',
+ 'responses.1xx',
+ 'responses.2xx',
+ 'responses.3xx',
+ 'responses.4xx',
+ 'responses.5xx',
+ 'discarded',
+ 'received',
+ 'sent'
+ ],
+ 'WEB_UPSTREAM_PEER': [
+ 'id',
+ 'server',
+ 'name',
+ 'state',
+ 'active',
+ 'max_conns',
+ 'requests',
+ 'header_time', # alive only
+ 'response_time', # alive only
+ 'responses.1xx',
+ 'responses.2xx',
+ 'responses.3xx',
+ 'responses.4xx',
+ 'responses.5xx',
+ 'sent',
+ 'received',
+ 'downtime'
+ ],
+ 'WEB_UPSTREAM_SUMMARY': [
+ 'responses.1xx',
+ 'responses.2xx',
+ 'responses.3xx',
+ 'responses.4xx',
+ 'responses.5xx',
+ 'sent',
+ 'received'
+ ],
+ 'CACHE': [
+ 'hit.bytes', # served
+ 'miss.bytes_written', # written
+ 'miss.bytes' # bypass
+
+ ]
+}
+
+BAD_SYMBOLS = re.compile(r'[:/.-]+')
+
+
+class Cache:
+ key = 'caches'
+ charts = cache_charts
+
+ def __init__(self, **kw):
+ self.real_name = kw['name']
+ self.name = BAD_SYMBOLS.sub('_', self.real_name)
+
+ def memory_usage(self, data):
+ used = data['slabs'][self.real_name]['pages']['used']
+ free = data['slabs'][self.real_name]['pages']['free']
+ return used / float(free + used) * 1e4
+
+ def get_data(self, raw_data):
+ zone_data = raw_data['caches'][self.real_name]
+ data = parse_json(zone_data, METRICS['CACHE'])
+ data['memory_usage'] = self.memory_usage(raw_data)
+ return dict(('_'.join([self.name, k]), v) for k, v in data.items())
+
+
+class WebZone:
+ key = 'server_zones'
+ charts = web_zone_charts
+
+ def __init__(self, **kw):
+ self.real_name = kw['name']
+ self.name = BAD_SYMBOLS.sub('_', self.real_name)
+
+ def get_data(self, raw_data):
+ zone_data = raw_data['server_zones'][self.real_name]
+ data = parse_json(zone_data, METRICS['WEB_ZONE'])
+ return dict(('_'.join([self.name, k]), v) for k, v in data.items())
+
+
+class WebUpstream:
+ key = 'upstreams'
+ charts = web_upstream_charts
+
+ def __init__(self, **kw):
+ self.real_name = kw['name']
+ self.name = BAD_SYMBOLS.sub('_', self.real_name)
+ self.peers = OrderedDict()
+
+ peers = kw['response']['upstreams'][self.real_name]['peers']
+ for peer in peers:
+ self.add_peer(peer['id'], peer['server'])
+
+ def __iter__(self):
+ return iter(self.peers.values())
+
+ def add_peer(self, idx, server):
+ peer = WebUpstreamPeer(idx, server)
+ self.peers[peer.real_server] = peer
+ return peer
+
+ def peers_stats(self, peers):
+ peers = {int(peer['id']): peer for peer in peers}
+ data = dict()
+ for peer in self.peers.values():
+ if not peer.active:
+ continue
+ try:
+ data.update(peer.get_data(peers[peer.id]))
+ except KeyError:
+ peer.active = False
+ return data
+
+ def memory_usage(self, data):
+ used = data['slabs'][self.real_name]['pages']['used']
+ free = data['slabs'][self.real_name]['pages']['free']
+ return used / float(free + used) * 1e4
+
+ def summary_stats(self, data):
+ rv = defaultdict(int)
+ for metric in METRICS['WEB_UPSTREAM_SUMMARY']:
+ for peer in self.peers.values():
+ if peer.active:
+ metric = '_'.join(metric.split('.'))
+ rv[metric] += data['_'.join([peer.server, metric])]
+ return rv
+
+ def get_data(self, raw_data):
+ data = dict()
+ peers = raw_data['upstreams'][self.real_name]['peers']
+ data.update(self.peers_stats(peers))
+ data.update(self.summary_stats(data))
+ data['memory_usage'] = self.memory_usage(raw_data)
+ return dict(('_'.join([self.name, k]), v) for k, v in data.items())
+
+
+class WebUpstreamPeer:
+ def __init__(self, idx, server):
+ self.id = idx
+ self.real_server = server
+ self.server = BAD_SYMBOLS.sub('_', self.real_server)
+ self.active = True
+
+ def get_data(self, raw):
+ data = dict(header_time=0, response_time=0, max_conns=0)
+ data.update(parse_json(raw, METRICS['WEB_UPSTREAM_PEER']))
+ data['connections_usage'] = 0 if not data['max_conns'] else data['active'] / float(data['max_conns']) * 1e4
+ data['state'] = int(data['state'] == 'up')
+ return dict(('_'.join([self.server, k]), v) for k, v in data.items())
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = list(ORDER)
+ self.definitions = deepcopy(CHARTS)
+ self.objects = dict()
+
+ def check(self):
+ if not self.url:
+ self.error('URL is not defined')
+ return None
+
+ self._manager = self._build_manager()
+ if not self._manager:
+ return None
+
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+
+ try:
+ response = loads(raw_data)
+ except ValueError:
+ return None
+
+ for obj_cls in [WebZone, WebUpstream, Cache]:
+ for obj_name in response.get(obj_cls.key, list()):
+ obj = obj_cls(name=obj_name, response=response)
+ self.objects[obj.real_name] = obj
+ charts = obj_cls.charts(obj)
+ for chart in charts:
+ self.order.append(chart)
+ self.definitions[chart] = charts[chart]
+
+ return bool(self.objects)
+
+ def _get_data(self):
+ """
+ Format data received from http request
+ :return: dict
+ """
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+ response = loads(raw_data)
+
+ data = parse_json(response, METRICS['SERVER'])
+ data['ssl_memory_usage'] = data['slabs_SSL_pages_used'] / float(data['slabs_SSL_pages_free']) * 1e4
+
+ for obj in self.objects.values():
+ if obj.real_name in response[obj.key]:
+ data.update(obj.get_data(response))
+
+ return data
+
+
+def parse_json(raw_data, metrics):
+ data = dict()
+ for metric in metrics:
+ value = raw_data
+ metrics_list = metric.split('.')
+ try:
+ for m in metrics_list:
+ value = value[m]
+ except KeyError:
+ continue
+ data['_'.join(metrics_list)] = value
+ return data
diff --git a/collectors/python.d.plugin/nginx_plus/nginx_plus.conf b/collectors/python.d.plugin/nginx_plus/nginx_plus.conf
new file mode 100644
index 000000000..7b5c8f43f
--- /dev/null
+++ b/collectors/python.d.plugin/nginx_plus/nginx_plus.conf
@@ -0,0 +1,87 @@
+# netdata python.d.plugin configuration for nginx_plus
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, nginx_plus also supports the following:
+#
+# url: 'URL' # the URL to fetch nginx_plus's stats
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ url : 'http://localhost/status'
+
+localipv4:
+ name : 'local'
+ url : 'http://127.0.0.1/status'
+
+localipv6:
+ name : 'local'
+ url : 'http://[::1]/status'
diff --git a/collectors/python.d.plugin/nsd/Makefile.inc b/collectors/python.d.plugin/nsd/Makefile.inc
new file mode 100644
index 000000000..58e9fd67d
--- /dev/null
+++ b/collectors/python.d.plugin/nsd/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += nsd/nsd.chart.py
+dist_pythonconfig_DATA += nsd/nsd.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += nsd/README.md nsd/Makefile.inc
+
diff --git a/collectors/python.d.plugin/nsd/README.md b/collectors/python.d.plugin/nsd/README.md
new file mode 100644
index 000000000..02c302f41
--- /dev/null
+++ b/collectors/python.d.plugin/nsd/README.md
@@ -0,0 +1,54 @@
+# nsd
+
+Module uses the `nsd-control stats_noreset` command to provide `nsd` statistics.
+
+**Requirements:**
+ * Version of `nsd` must be 4.0+
+ * Netdata must have permissions to run `nsd-control stats_noreset`
+
+It produces:
+
+1. **Queries**
+ * queries
+
+2. **Zones**
+ * master
+ * slave
+
+3. **Protocol**
+ * udp
+ * udp6
+ * tcp
+ * tcp6
+
+4. **Query Type**
+ * A
+ * NS
+ * CNAME
+ * SOA
+ * PTR
+ * HINFO
+ * MX
+ * NAPTR
+ * TXT
+ * AAAA
+ * SRV
+ * ANY
+
+5. **Transfer**
+ * NOTIFY
+ * AXFR
+
+6. **Return Code**
+ * NOERROR
+ * FORMERR
+ * SERVFAIL
+ * NXDOMAIN
+ * NOTIMP
+ * REFUSED
+ * YXDOMAIN
+
+
+Configuration is not needed.
+
+---
diff --git a/collectors/python.d.plugin/nsd/nsd.chart.py b/collectors/python.d.plugin/nsd/nsd.chart.py
new file mode 100644
index 000000000..d713f46bd
--- /dev/null
+++ b/collectors/python.d.plugin/nsd/nsd.chart.py
@@ -0,0 +1,100 @@
+# -*- coding: utf-8 -*-
+# Description: NSD `nsd-control stats_noreset` netdata python.d module
+# Author: <383c57 at gmail.com>
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import re
+
+from bases.FrameworkServices.ExecutableService import ExecutableService
+
+# default module values (can be overridden per job in `config`)
+priority = 60000
+retries = 5
+update_every = 30
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = ['queries', 'zones', 'protocol', 'type', 'transfer', 'rcode']
+
+CHARTS = {
+ 'queries': {
+ 'options': [None, 'queries', 'queries/s', 'queries', 'nsd.queries', 'line'],
+ 'lines': [
+ ['num_queries', 'queries', 'incremental']
+ ]
+ },
+ 'zones': {
+ 'options': [None, 'zones', 'zones', 'zones', 'nsd.zones', 'stacked'],
+ 'lines': [
+ ['zone_master', 'master', 'absolute'],
+ ['zone_slave', 'slave', 'absolute']
+ ]
+ },
+ 'protocol': {
+ 'options': [None, 'protocol', 'queries/s', 'protocol', 'nsd.protocols', 'stacked'],
+ 'lines': [
+ ['num_udp', 'udp', 'incremental'],
+ ['num_udp6', 'udp6', 'incremental'],
+ ['num_tcp', 'tcp', 'incremental'],
+ ['num_tcp6', 'tcp6', 'incremental']
+ ]
+ },
+ 'type': {
+ 'options': [None, 'query type', 'queries/s', 'query type', 'nsd.type', 'stacked'],
+ 'lines': [
+ ['num_type_A', 'A', 'incremental'],
+ ['num_type_NS', 'NS', 'incremental'],
+ ['num_type_CNAME', 'CNAME', 'incremental'],
+ ['num_type_SOA', 'SOA', 'incremental'],
+ ['num_type_PTR', 'PTR', 'incremental'],
+ ['num_type_HINFO', 'HINFO', 'incremental'],
+ ['num_type_MX', 'MX', 'incremental'],
+ ['num_type_NAPTR', 'NAPTR', 'incremental'],
+ ['num_type_TXT', 'TXT', 'incremental'],
+ ['num_type_AAAA', 'AAAA', 'incremental'],
+ ['num_type_SRV', 'SRV', 'incremental'],
+ ['num_type_TYPE255', 'ANY', 'incremental']
+ ]
+ },
+ 'transfer': {
+ 'options': [None, 'transfer', 'queries/s', 'transfer', 'nsd.transfer', 'stacked'],
+ 'lines': [
+ ['num_opcode_NOTIFY', 'NOTIFY', 'incremental'],
+ ['num_type_TYPE252', 'AXFR', 'incremental']
+ ]
+ },
+ 'rcode': {
+ 'options': [None, 'return code', 'queries/s', 'return code', 'nsd.rcode', 'stacked'],
+ 'lines': [
+ ['num_rcode_NOERROR', 'NOERROR', 'incremental'],
+ ['num_rcode_FORMERR', 'FORMERR', 'incremental'],
+ ['num_rcode_SERVFAIL', 'SERVFAIL', 'incremental'],
+ ['num_rcode_NXDOMAIN', 'NXDOMAIN', 'incremental'],
+ ['num_rcode_NOTIMP', 'NOTIMP', 'incremental'],
+ ['num_rcode_REFUSED', 'REFUSED', 'incremental'],
+ ['num_rcode_YXDOMAIN', 'YXDOMAIN', 'incremental']
+ ]
+ }
+}
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(
+ self, configuration=configuration, name=name)
+ self.command = 'nsd-control stats_noreset'
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.regex = re.compile(r'([A-Za-z0-9.]+)=(\d+)')
+
+ def _get_data(self):
+ lines = self._get_raw_data()
+ if not lines:
+ return None
+
+ r = self.regex
+ stats = dict((k.replace('.', '_'), int(v))
+ for k, v in r.findall(''.join(lines)))
+ stats.setdefault('num_opcode_NOTIFY', 0)
+ stats.setdefault('num_type_TYPE252', 0)
+ stats.setdefault('num_type_TYPE255', 0)
+ return stats
diff --git a/collectors/python.d.plugin/nsd/nsd.conf b/collectors/python.d.plugin/nsd/nsd.conf
new file mode 100644
index 000000000..078e97216
--- /dev/null
+++ b/collectors/python.d.plugin/nsd/nsd.conf
@@ -0,0 +1,93 @@
+# netdata python.d.plugin configuration for nsd
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# nsd-control is slow, so once every 30 seconds
+# update_every: 30
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, nsd also supports the following:
+#
+# command: 'nsd-control stats_noreset' # the command to run
+#
+
+# ----------------------------------------------------------------------
+# IMPORTANT Information
+#
+# Netdata must have permissions to run `nsd-control stats_noreset` command
+#
+# - Example-1 (use "sudo")
+# 1. sudoers (e.g. visudo -f /etc/sudoers.d/netdata)
+# Defaults:netdata !requiretty
+# netdata ALL=(ALL) NOPASSWD: /usr/sbin/nsd-control stats_noreset
+# 2. etc/netdata/python.d/nsd.conf
+# local:
+# update_every: 30
+# command: 'sudo /usr/sbin/nsd-control stats_noreset'
+#
+# - Example-2 (add "netdata" user to "nsd" group)
+# usermod -aG nsd netdata
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+
+local:
+ update_every: 30
+ command: 'nsd-control stats_noreset'
diff --git a/collectors/python.d.plugin/ntpd/Makefile.inc b/collectors/python.d.plugin/ntpd/Makefile.inc
new file mode 100644
index 000000000..81210ebab
--- /dev/null
+++ b/collectors/python.d.plugin/ntpd/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += ntpd/ntpd.chart.py
+dist_pythonconfig_DATA += ntpd/ntpd.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += ntpd/README.md ntpd/Makefile.inc
+
diff --git a/collectors/python.d.plugin/ntpd/README.md b/collectors/python.d.plugin/ntpd/README.md
new file mode 100644
index 000000000..b0fa17fde
--- /dev/null
+++ b/collectors/python.d.plugin/ntpd/README.md
@@ -0,0 +1,71 @@
+# ntpd
+
+Module monitors the system variables of the local `ntpd` daemon (optional incl. variables of the polled peers) using the NTP Control Message Protocol via UDP socket, similar to `ntpq`, the [standard NTP query program](http://doc.ntp.org/current-stable/ntpq.html).
+
+**Requirements:**
+ * Version: `NTPv4`
+ * Local interrogation allowed in `/etc/ntp.conf` (default):
+
+```
+# Local users may interrogate the ntp server more closely.
+restrict 127.0.0.1
+restrict ::1
+```
+
+It produces:
+
+1. system
+ * offset
+ * jitter
+ * frequency
+ * delay
+ * dispersion
+ * stratum
+ * tc
+ * precision
+
+2. peers
+ * offset
+ * delay
+ * dispersion
+ * jitter
+ * rootdelay
+ * rootdispersion
+ * stratum
+ * hmode
+ * pmode
+ * hpoll
+ * ppoll
+ * precision
+
+**configuration**
+
+Sample:
+
+```yaml
+update_every: 10
+
+host: 'localhost'
+port: '123'
+show_peers: yes
+# hide peers with source address in ranges 127.0.0.0/8 and 192.168.0.0/16
+peer_filter: '(127\..*)|(192\.168\..*)'
+# check for new/changed peers every 60 updates
+peer_rescan: 60
+```
+
+Sample (multiple jobs):
+
+Note: `ntp.conf` on the host `otherhost` must be configured to allow queries from our local host by including a line like `restrict <IP> nomodify notrap nopeer`.
+
+```yaml
+local:
+ host: 'localhost'
+
+otherhost:
+ host: 'otherhost'
+```
+
+If no configuration is given, module will attempt to connect to `ntpd` on `::1:123` or `127.0.0.1:123` and show charts for the systemvars. Use `show_peers: yes` to also show the charts for configured peers. Local peers in the range `127.0.0.0/8` are hidden by default, use `peer_filter: ''` to show all peers.
+
+---
diff --git a/collectors/python.d.plugin/ntpd/ntpd.chart.py b/collectors/python.d.plugin/ntpd/ntpd.chart.py
new file mode 100644
index 000000000..79d557c80
--- /dev/null
+++ b/collectors/python.d.plugin/ntpd/ntpd.chart.py
@@ -0,0 +1,390 @@
+# -*- coding: utf-8 -*-
+# Description: ntpd netdata python.d module
+# Author: Sven Mäder (rda0)
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import struct
+import re
+
+from bases.FrameworkServices.SocketService import SocketService
+
+# default module values
+update_every = 1
+priority = 60000
+retries = 60
+
+# NTP Control Message Protocol constants
+MODE = 6
+HEADER_FORMAT = '!BBHHHHH'
+HEADER_LEN = 12
+OPCODES = {
+ 'readstat': 1,
+ 'readvar': 2
+}
+
+# Maximal dimension precision
+PRECISION = 1000000
+
+# Static charts
+ORDER = [
+ 'sys_offset',
+ 'sys_jitter',
+ 'sys_frequency',
+ 'sys_wander',
+ 'sys_rootdelay',
+ 'sys_rootdisp',
+ 'sys_stratum',
+ 'sys_tc',
+ 'sys_precision',
+ 'peer_offset',
+ 'peer_delay',
+ 'peer_dispersion',
+ 'peer_jitter',
+ 'peer_xleave',
+ 'peer_rootdelay',
+ 'peer_rootdisp',
+ 'peer_stratum',
+ 'peer_hmode',
+ 'peer_pmode',
+ 'peer_hpoll',
+ 'peer_ppoll',
+ 'peer_precision'
+]
+
+CHARTS = {
+ 'sys_offset': {
+ 'options': [None, 'Combined offset of server relative to this host', 'ms', 'system', 'ntpd.sys_offset', 'area'],
+ 'lines': [
+ ['offset', 'offset', 'absolute', 1, PRECISION]
+ ]
+ },
+ 'sys_jitter': {
+ 'options': [None, 'Combined system jitter and clock jitter', 'ms', 'system', 'ntpd.sys_jitter', 'line'],
+ 'lines': [
+ ['sys_jitter', 'system', 'absolute', 1, PRECISION],
+ ['clk_jitter', 'clock', 'absolute', 1, PRECISION]
+ ]
+ },
+ 'sys_frequency': {
+ 'options': [None, 'Frequency offset relative to hardware clock', 'ppm', 'system', 'ntpd.sys_frequency', 'area'],
+ 'lines': [
+ ['frequency', 'frequency', 'absolute', 1, PRECISION]
+ ]
+ },
+ 'sys_wander': {
+ 'options': [None, 'Clock frequency wander', 'ppm', 'system', 'ntpd.sys_wander', 'area'],
+ 'lines': [
+ ['clk_wander', 'clock', 'absolute', 1, PRECISION]
+ ]
+ },
+ 'sys_rootdelay': {
+ 'options': [None, 'Total roundtrip delay to the primary reference clock', 'ms', 'system',
+ 'ntpd.sys_rootdelay', 'area'],
+ 'lines': [
+ ['rootdelay', 'delay', 'absolute', 1, PRECISION]
+ ]
+ },
+ 'sys_rootdisp': {
+ 'options': [None, 'Total root dispersion to the primary reference clock', 'ms', 'system',
+ 'ntpd.sys_rootdisp', 'area'],
+ 'lines': [
+ ['rootdisp', 'dispersion', 'absolute', 1, PRECISION]
+ ]
+ },
+ 'sys_stratum': {
+ 'options': [None, 'Stratum (1-15)', 'stratum', 'system', 'ntpd.sys_stratum', 'line'],
+ 'lines': [
+ ['stratum', 'stratum', 'absolute', 1, PRECISION]
+ ]
+ },
+ 'sys_tc': {
+ 'options': [None, 'Time constant and poll exponent (3-17)', 'log2 s', 'system', 'ntpd.sys_tc', 'line'],
+ 'lines': [
+ ['tc', 'current', 'absolute', 1, PRECISION],
+ ['mintc', 'minimum', 'absolute', 1, PRECISION]
+ ]
+ },
+ 'sys_precision': {
+ 'options': [None, 'Precision', 'log2 s', 'system', 'ntpd.sys_precision', 'line'],
+ 'lines': [
+ ['precision', 'precision', 'absolute', 1, PRECISION]
+ ]
+ }
+}
+
+PEER_CHARTS = {
+ 'peer_offset': {
+ 'options': [None, 'Filter offset', 'ms', 'peers', 'ntpd.peer_offset', 'line'],
+ 'lines': []
+ },
+ 'peer_delay': {
+ 'options': [None, 'Filter delay', 'ms', 'peers', 'ntpd.peer_delay', 'line'],
+ 'lines': []
+ },
+ 'peer_dispersion': {
+ 'options': [None, 'Filter dispersion', 'ms', 'peers', 'ntpd.peer_dispersion', 'line'],
+ 'lines': []
+ },
+ 'peer_jitter': {
+ 'options': [None, 'Filter jitter', 'ms', 'peers', 'ntpd.peer_jitter', 'line'],
+ 'lines': []
+ },
+ 'peer_xleave': {
+ 'options': [None, 'Interleave delay', 'ms', 'peers', 'ntpd.peer_xleave', 'line'],
+ 'lines': []
+ },
+ 'peer_rootdelay': {
+ 'options': [None, 'Total roundtrip delay to the primary reference clock', 'ms', 'peers',
+ 'ntpd.peer_rootdelay', 'line'],
+ 'lines': []
+ },
+ 'peer_rootdisp': {
+ 'options': [None, 'Total root dispersion to the primary reference clock', 'ms', 'peers',
+ 'ntpd.peer_rootdisp', 'line'],
+ 'lines': []
+ },
+ 'peer_stratum': {
+ 'options': [None, 'Stratum (1-15)', 'stratum', 'peers', 'ntpd.peer_stratum', 'line'],
+ 'lines': []
+ },
+ 'peer_hmode': {
+ 'options': [None, 'Host mode (1-6)', 'hmode', 'peers', 'ntpd.peer_hmode', 'line'],
+ 'lines': []
+ },
+ 'peer_pmode': {
+ 'options': [None, 'Peer mode (1-5)', 'pmode', 'peers', 'ntpd.peer_pmode', 'line'],
+ 'lines': []
+ },
+ 'peer_hpoll': {
+ 'options': [None, 'Host poll exponent', 'log2 s', 'peers', 'ntpd.peer_hpoll', 'line'],
+ 'lines': []
+ },
+ 'peer_ppoll': {
+ 'options': [None, 'Peer poll exponent', 'log2 s', 'peers', 'ntpd.peer_ppoll', 'line'],
+ 'lines': []
+ },
+ 'peer_precision': {
+ 'options': [None, 'Precision', 'log2 s', 'peers', 'ntpd.peer_precision', 'line'],
+ 'lines': []
+ }
+}
+
+
+class Base:
+ regex = re.compile(r'([a-z_]+)=((?:-)?[0-9]+(?:\.[0-9]+)?)')
+
+ @staticmethod
+ def get_header(associd=0, operation='readvar'):
+ """
+ Constructs the NTP Control Message header:
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |LI | VN |Mode |R|E|M| OpCode | Sequence Number |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Status | Association ID |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Offset | Count |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ """
+ version = 2
+ sequence = 1
+ status = 0
+ offset = 0
+ count = 0
+ header = struct.pack(HEADER_FORMAT, (version << 3 | MODE), OPCODES[operation],
+ sequence, status, associd, offset, count)
+ return header
+
+
+class System(Base):
+ def __init__(self):
+ self.request = self.get_header()
+
+ def get_data(self, raw):
+ """
+ Extracts key=value pairs with float/integer from ntp response packet data.
+ """
+ data = dict()
+ for key, value in self.regex.findall(raw):
+ data[key] = float(value) * PRECISION
+ return data
+
+
+class Peer(Base):
+ def __init__(self, idx, name):
+ self.id = idx
+ self.real_name = name
+ self.name = name.replace('.', '_')
+ self.request = self.get_header(self.id)
+
+ def get_data(self, raw):
+ """
+ Extracts key=value pairs with float/integer from ntp response packet data.
+ """
+ data = dict()
+ for key, value in self.regex.findall(raw):
+ dimension = '_'.join([self.name, key])
+ data[dimension] = float(value) * PRECISION
+ return data
+
+
+class Service(SocketService):
+ def __init__(self, configuration=None, name=None):
+ SocketService.__init__(self, configuration=configuration, name=name)
+ self.order = list(ORDER)
+ self.definitions = dict(CHARTS)
+
+ self.port = 'ntp'
+ self.dgram_socket = True
+ self.system = System()
+ self.peers = dict()
+ self.request = str()
+ self.retries = 0
+ self.show_peers = self.configuration.get('show_peers', False)
+ self.peer_rescan = self.configuration.get('peer_rescan', 60)
+
+ if self.show_peers:
+ self.definitions.update(PEER_CHARTS)
+
+ def check(self):
+ """
+ Checks if we can get valid systemvars.
+ If not, returns None to disable module.
+ """
+ self._parse_config()
+
+ peer_filter = self.configuration.get('peer_filter', r'127\..*')
+ try:
+ self.peer_filter = re.compile(r'^((0\.0\.0\.0)|({0}))$'.format(peer_filter))
+ except re.error as error:
+ self.error('Compile pattern error (peer_filter) : {0}'.format(error))
+ return None
+
+ self.request = self.system.request
+ raw_systemvars = self._get_raw_data()
+
+ if not self.system.get_data(raw_systemvars):
+ return None
+
+ return True
+
+ def get_data(self):
+ """
+ Gets systemvars data on each update.
+ Gets peervars data for all peers on each update.
+ """
+ data = dict()
+
+ self.request = self.system.request
+ raw = self._get_raw_data()
+ if not raw:
+ return None
+
+ data.update(self.system.get_data(raw))
+
+ if not self.show_peers:
+ return data
+
+ if not self.peers or self.runs_counter % self.peer_rescan == 0 or self.retries > 8:
+ self.find_new_peers()
+
+ for peer in self.peers.values():
+ self.request = peer.request
+ peer_data = peer.get_data(self._get_raw_data())
+ if peer_data:
+ data.update(peer_data)
+ else:
+ self.retries += 1
+
+ return data
+
+ def find_new_peers(self):
+ new_peers = dict((p.real_name, p) for p in self.get_peers())
+ if new_peers:
+
+ peers_to_remove = set(self.peers) - set(new_peers)
+ peers_to_add = set(new_peers) - set(self.peers)
+
+ for peer_name in peers_to_remove:
+ self.hide_old_peer_from_charts(self.peers[peer_name])
+ del self.peers[peer_name]
+
+ for peer_name in peers_to_add:
+ self.add_new_peer_to_charts(new_peers[peer_name])
+
+ self.peers.update(new_peers)
+ self.retries = 0
+
+ def add_new_peer_to_charts(self, peer):
+ for chart_id in set(self.charts.charts) & set(PEER_CHARTS):
+ dim_id = peer.name + chart_id[4:]
+ if dim_id not in self.charts[chart_id]:
+ self.charts[chart_id].add_dimension([dim_id, peer.real_name, 'absolute', 1, PRECISION])
+ else:
+ self.charts[chart_id].hide_dimension(dim_id, reverse=True)
+
+ def hide_old_peer_from_charts(self, peer):
+ for chart_id in set(self.charts.charts) & set(PEER_CHARTS):
+ dim_id = peer.name + chart_id[4:]
+ self.charts[chart_id].hide_dimension(dim_id)
+
+ def get_peers(self):
+ self.request = Base.get_header(operation='readstat')
+
+ raw_data = self._get_raw_data(raw=True)
+ if not raw_data:
+ return list()
+
+ peer_ids = self.get_peer_ids(raw_data)
+ if not peer_ids:
+ return list()
+
+ new_peers = list()
+ for peer_id in peer_ids:
+ self.request = Base.get_header(peer_id)
+ raw_peer_data = self._get_raw_data()
+ if not raw_peer_data:
+ continue
+ srcadr = re.search(r'(srcadr)=([^,]+)', raw_peer_data)
+ if not srcadr:
+ continue
+ srcadr = srcadr.group(2)
+ if self.peer_filter.search(srcadr):
+ continue
+ stratum = re.search(r'(stratum)=([^,]+)', raw_peer_data)
+ if not stratum:
+ continue
+ if int(stratum.group(2)) > 15:
+ continue
+
+ new_peer = Peer(idx=peer_id, name=srcadr)
+ new_peers.append(new_peer)
+ return new_peers
+
+ def get_peer_ids(self, res):
+ """
+ Unpack the NTP Control Message header
+ Get data length from header
+ Get list of association ids returned in the readstat response
+ """
+
+ try:
+ count = struct.unpack(HEADER_FORMAT, res[:HEADER_LEN])[6]
+ except struct.error as error:
+ self.error('error unpacking header: {0}'.format(error))
+ return None
+ if not count:
+ self.error('empty data field in NTP control packet')
+ return None
+
+ data_end = HEADER_LEN + count
+ data = res[HEADER_LEN:data_end]
+ data_format = ''.join(['!', 'H' * int(count / 2)])
+ try:
+ peer_ids = list(struct.unpack(data_format, data))[::2]
+ except struct.error as error:
+ self.error('error unpacking data: {0}'.format(error))
+ return None
+ return peer_ids
diff --git a/collectors/python.d.plugin/ntpd/ntpd.conf b/collectors/python.d.plugin/ntpd/ntpd.conf
new file mode 100644
index 000000000..7adc4074b
--- /dev/null
+++ b/collectors/python.d.plugin/ntpd/ntpd.conf
@@ -0,0 +1,91 @@
+# netdata python.d.plugin configuration for ntpd
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+#
+# Additionally to the above, ntp also supports the following:
+#
+# host: 'localhost' # the host to query
+# port: '123' # the UDP port where `ntpd` listens
+# show_peers: no # use `yes` to show peer charts. enabling this
+# # option is recommended only for debugging, as
+# # it could possibly imply memory leaks if the
+# # peers change frequently.
+# peer_filter: '127\..*' # regex to exclude peers
+# # by default local peers are hidden
+# # use `''` to show all peers.
+# peer_rescan: 60 # interval (>0) to check for new/changed peers
+# # use `1` to check on every update
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name: 'local'
+ host: 'localhost'
+ port: '123'
+ show_peers: no
+
+localhost_ipv4:
+ name: 'local'
+ host: '127.0.0.1'
+ port: '123'
+ show_peers: no
+
+localhost_ipv6:
+ name: 'local'
+ host: '::1'
+ port: '123'
+ show_peers: no
diff --git a/collectors/python.d.plugin/ovpn_status_log/Makefile.inc b/collectors/python.d.plugin/ovpn_status_log/Makefile.inc
new file mode 100644
index 000000000..1fbc506d6
--- /dev/null
+++ b/collectors/python.d.plugin/ovpn_status_log/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += ovpn_status_log/ovpn_status_log.chart.py
+dist_pythonconfig_DATA += ovpn_status_log/ovpn_status_log.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += ovpn_status_log/README.md ovpn_status_log/Makefile.inc
+
diff --git a/collectors/python.d.plugin/ovpn_status_log/README.md b/collectors/python.d.plugin/ovpn_status_log/README.md
new file mode 100644
index 000000000..be1ea279e
--- /dev/null
+++ b/collectors/python.d.plugin/ovpn_status_log/README.md
@@ -0,0 +1,32 @@
+# ovpn_status_log
+
+Module monitor openvpn-status log file.
+
+**Requirements:**
+
+ * If you are running multiple OpenVPN instances out of the same directory, MAKE SURE TO EDIT DIRECTIVES which create output files
+ so that multiple instances do not overwrite each other's output files.
+
+ * Make sure NETDATA USER CAN READ openvpn-status.log
+
+ * Update_every interval MUST MATCH interval on which OpenVPN writes operational status to log file.
+
+It produces:
+
+1. **Users** OpenVPN active users
+ * users
+
+2. **Traffic** OpenVPN overall bandwidth usage in kilobit/s
+ * in
+ * out
+
+### configuration
+
+Sample:
+
+```yaml
+default
+ log_path : '/var/log/openvpn-status.log'
+```
+
+---
diff --git a/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py
new file mode 100644
index 000000000..64d7062d9
--- /dev/null
+++ b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py
@@ -0,0 +1,129 @@
+# -*- coding: utf-8 -*-
+# Description: openvpn status log netdata python.d module
+# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from re import compile as r_compile
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+priority = 60000
+retries = 60
+update_every = 10
+
+ORDER = ['users', 'traffic']
+CHARTS = {
+ 'users': {
+ 'options': [None, 'OpenVPN Active Users', 'active users', 'users', 'openvpn_status.users', 'line'],
+ 'lines': [
+ ['users', None, 'absolute'],
+ ]
+ },
+ 'traffic': {
+ 'options': [None, 'OpenVPN Traffic', 'KB/s', 'traffic', 'openvpn_status.traffic', 'area'],
+ 'lines': [
+ ['bytes_in', 'in', 'incremental', 1, 1 << 10], ['bytes_out', 'out', 'incremental', 1, -1 << 10]
+ ]
+ }
+}
+
+TLS_REGEX = r_compile(r'(?:[0-9a-f:]+|(?:\d{1,3}(?:\.\d{1,3}){3}(?::\d+)?)) (?P<bytes_in>\d+) (?P<bytes_out>\d+)')
+STATIC_KEY_REGEX = r_compile(r'TCP/[A-Z]+ (?P<direction>(?:read|write)) bytes,(?P<bytes>\d+)')
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.log_path = self.configuration.get('log_path')
+ self.regex = {
+ 'tls': TLS_REGEX,
+ 'static_key': STATIC_KEY_REGEX
+ }
+
+ def check(self):
+ if not (self.log_path and isinstance(self.log_path, str)):
+ self.error("'log_path' is not defined")
+ return False
+
+ data = self._get_raw_data()
+ if not data:
+ self.error('Make sure that the openvpn status log file exists and netdata has permission to read it')
+ return None
+
+ found = None
+ for row in data:
+ if 'ROUTING' in row:
+ self.get_data = self.get_data_tls
+ found = True
+ break
+ elif 'STATISTICS' in row:
+ self.get_data = self.get_data_static_key
+ found = True
+ break
+ if found:
+ return True
+ self.error('Failed to parse ovpenvpn log file')
+ return False
+
+ def _get_raw_data(self):
+ """
+ Open log file
+ :return: str
+ """
+
+ try:
+ with open(self.log_path) as log:
+ raw_data = log.readlines() or None
+ except OSError:
+ return None
+ else:
+ return raw_data
+
+ def get_data_static_key(self):
+ """
+ Parse openvpn-status log file.
+ """
+
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+
+ data = dict(bytes_in=0, bytes_out=0)
+
+ for row in raw_data:
+ match = self.regex['static_key'].search(row)
+ if match:
+ match = match.groupdict()
+ if match['direction'] == 'read':
+ data['bytes_in'] += int(match['bytes'])
+ else:
+ data['bytes_out'] += int(match['bytes'])
+
+ return data or None
+
+ def get_data_tls(self):
+ """
+ Parse openvpn-status log file.
+ """
+
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+
+ data = dict(users=0, bytes_in=0, bytes_out=0)
+ for row in raw_data:
+ columns = row.split(',') if ',' in row else row.split()
+ if 'UNDEF' in columns:
+ # see https://openvpn.net/archive/openvpn-users/2004-08/msg00116.html
+ continue
+
+ match = self.regex['tls'].search(' '.join(columns))
+ if match:
+ match = match.groupdict()
+ data['users'] += 1
+ data['bytes_in'] += int(match['bytes_in'])
+ data['bytes_out'] += int(match['bytes_out'])
+
+ return data or None
diff --git a/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.conf b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.conf
new file mode 100644
index 000000000..6fb35a530
--- /dev/null
+++ b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.conf
@@ -0,0 +1,99 @@
+# netdata python.d.plugin configuration for openvpn status log
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, openvpn status log also supports the following:
+#
+# log_path: 'PATH' # the path to openvpn status log file
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+#
+# IMPORTANT information
+#
+# 1. If you are running multiple OpenVPN instances out of the same directory, MAKE SURE TO EDIT DIRECTIVES which create output files
+# so that multiple instances do not overwrite each other's output files.
+# 2. Make sure NETDATA USER CAN READ openvpn-status.log
+#
+# * cd into directory with openvpn-status.log and run the following commands as root
+# * #chown :netdata openvpn-status.log && chmod 640 openvpn-status.log
+# * To check permission and group membership run
+# * #ls -l openvpn-status.log
+# -rw-r----- 1 root netdata 359 dec 21 21:22 openvpn-status.log
+#
+# 3. Update_every interval MUST MATCH interval on which OpenVPN writes operational status to log file.
+# If its not true traffic chart WILL DISPLAY WRONG values
+#
+# Default OpenVPN update interval is 10 second on Debian 8
+# # ps -C openvpn -o command=
+# /usr/sbin/openvpn --daemon ovpn-server --status /run/openvpn/server.status 10 --cd /etc/openvpn --config /etc/openvpn/server.conf
+#
+# 4. Confirm status is configured in your OpenVPN configuration.
+# * Open OpenVPN config in an editor (e.g. sudo nano /etc/openvpn/default.conf)
+# * Confirm status is enabled with below:
+# status /var/log/openvpn-status.log
+#
+#default:
+# log_path: '/var/log/openvpn-status.log'
+#
+# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/phpfpm/Makefile.inc b/collectors/python.d.plugin/phpfpm/Makefile.inc
new file mode 100644
index 000000000..ff312fe18
--- /dev/null
+++ b/collectors/python.d.plugin/phpfpm/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += phpfpm/phpfpm.chart.py
+dist_pythonconfig_DATA += phpfpm/phpfpm.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += phpfpm/README.md phpfpm/Makefile.inc
+
diff --git a/collectors/python.d.plugin/phpfpm/README.md b/collectors/python.d.plugin/phpfpm/README.md
new file mode 100644
index 000000000..66930463f
--- /dev/null
+++ b/collectors/python.d.plugin/phpfpm/README.md
@@ -0,0 +1,40 @@
+# phpfpm
+
+This module will monitor one or more php-fpm instances depending on configuration.
+
+**Requirements:**
+ * php-fpm with enabled `status` page
+ * access to `status` page via web server
+
+It produces following charts:
+
+1. **Active Connections**
+ * active
+ * maxActive
+ * idle
+
+2. **Requests** in requests/s
+ * requests
+
+3. **Performance**
+ * reached
+ * slow
+
+### configuration
+
+Needs only `url` to server's `status`
+
+Here is an example for local instance:
+
+```yaml
+update_every : 3
+priority : 90100
+
+local:
+ url : 'http://localhost/status'
+ retries : 10
+```
+
+Without configuration, module attempts to connect to `http://localhost/status`
+
+---
diff --git a/collectors/python.d.plugin/phpfpm/phpfpm.chart.py b/collectors/python.d.plugin/phpfpm/phpfpm.chart.py
new file mode 100644
index 000000000..a3f0963fc
--- /dev/null
+++ b/collectors/python.d.plugin/phpfpm/phpfpm.chart.py
@@ -0,0 +1,177 @@
+# -*- coding: utf-8 -*-
+# Description: PHP-FPM netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import json
+import re
+
+from bases.FrameworkServices.UrlService import UrlService
+
+# default module values (can be overridden per job in `config`)
+# update_every = 2
+priority = 60000
+retries = 60
+
+# default job configuration (overridden by python.d.plugin)
+# config = {'local': {
+# 'update_every': update_every,
+# 'retries': retries,
+# 'priority': priority,
+# 'url': 'http://localhost/status?full&json'
+# }}
+
+# charts order (can be overridden if you want less charts, or different order)
+
+POOL_INFO = [
+ ('active processes', 'active'),
+ ('max active processes', 'maxActive'),
+ ('idle processes', 'idle'),
+ ('accepted conn', 'requests'),
+ ('max children reached', 'reached'),
+ ('slow requests', 'slow')
+]
+
+PER_PROCESS_INFO = [
+ ('request duration', 'ReqDur'),
+ ('last request cpu', 'ReqCpu'),
+ ('last request memory', 'ReqMem')
+]
+
+
+def average(collection):
+ return sum(collection, 0.0) / max(len(collection), 1)
+
+
+CALC = [
+ ('min', min),
+ ('max', max),
+ ('avg', average)
+]
+
+ORDER = ['connections', 'requests', 'performance', 'request_duration', 'request_cpu', 'request_mem']
+
+CHARTS = {
+ 'connections': {
+ 'options': [None, 'PHP-FPM Active Connections', 'connections', 'active connections', 'phpfpm.connections',
+ 'line'],
+ 'lines': [
+ ['active'],
+ ['maxActive', 'max active'],
+ ['idle']
+ ]
+ },
+ 'requests': {
+ 'options': [None, 'PHP-FPM Requests', 'requests/s', 'requests', 'phpfpm.requests', 'line'],
+ 'lines': [
+ ['requests', None, 'incremental']
+ ]
+ },
+ 'performance': {
+ 'options': [None, 'PHP-FPM Performance', 'status', 'performance', 'phpfpm.performance', 'line'],
+ 'lines': [
+ ['reached', 'max children reached'],
+ ['slow', 'slow requests']
+ ]
+ },
+ 'request_duration': {
+ 'options': [None, 'PHP-FPM Request Duration', 'milliseconds', 'request duration', 'phpfpm.request_duration',
+ 'line'],
+ 'lines': [
+ ['minReqDur', 'min', 'absolute', 1, 1000],
+ ['maxReqDur', 'max', 'absolute', 1, 1000],
+ ['avgReqDur', 'avg', 'absolute', 1, 1000]
+ ]
+ },
+ 'request_cpu': {
+ 'options': [None, 'PHP-FPM Request CPU', 'percent', 'request CPU', 'phpfpm.request_cpu', 'line'],
+ 'lines': [
+ ['minReqCpu', 'min'],
+ ['maxReqCpu', 'max'],
+ ['avgReqCpu', 'avg']
+ ]
+ },
+ 'request_mem': {
+ 'options': [None, 'PHP-FPM Request Memory', 'kilobytes', 'request memory', 'phpfpm.request_mem', 'line'],
+ 'lines': [
+ ['minReqMem', 'min', 'absolute', 1, 1024],
+ ['maxReqMem', 'max', 'absolute', 1, 1024],
+ ['avgReqMem', 'avg', 'absolute', 1, 1024]
+ ]
+ }
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.url = self.configuration.get('url', 'http://localhost/status?full&json')
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.regex = re.compile(r'([a-z][a-z ]+): ([\d.]+)')
+ self.json = '&json' in self.url or '?json' in self.url
+ self.json_full = self.url.endswith(('?full&json', '?json&full'))
+ self.if_all_processes_running = dict([(c_name + p_name, 0) for c_name, func in CALC
+ for metric, p_name in PER_PROCESS_INFO])
+
+ def _get_data(self):
+ """
+ Format data received from http request
+ :return: dict
+ """
+ raw = self._get_raw_data()
+ if not raw:
+ return None
+
+ raw_json = parse_raw_data_(is_json=self.json, regex=self.regex, raw_data=raw)
+
+ # Per Pool info: active connections, requests and performance charts
+ to_netdata = fetch_data_(raw_data=raw_json, metrics_list=POOL_INFO)
+
+ # Per Process Info: duration, cpu and memory charts (min, max, avg)
+ if self.json_full:
+ p_info = dict()
+ to_netdata.update(self.if_all_processes_running) # If all processes are in running state
+ # Metrics are always 0 if the process is not in Idle state because calculation is done
+ # when the request processing has terminated
+ for process in [p for p in raw_json['processes'] if p['state'] == 'Idle']:
+ p_info.update(fetch_data_(raw_data=process, metrics_list=PER_PROCESS_INFO, pid=str(process['pid'])))
+
+ if p_info:
+ for new_name in PER_PROCESS_INFO:
+ for name, func in CALC:
+ to_netdata[name + new_name[1]] = func([p_info[k] for k in p_info if new_name[1] in k])
+
+ return to_netdata or None
+
+
+def fetch_data_(raw_data, metrics_list, pid=''):
+ """
+ :param raw_data: dict
+ :param metrics_list: list
+ :param pid: str
+ :return: dict
+ """
+ result = dict()
+ for metric, new_name in metrics_list:
+ if metric in raw_data:
+ result[new_name + pid] = float(raw_data[metric])
+ return result
+
+
+def parse_raw_data_(is_json, regex, raw_data):
+ """
+ :param is_json: bool
+ :param regex: compiled regular expr
+ :param raw_data: dict
+ :return: dict
+ """
+ if is_json:
+ try:
+ return json.loads(raw_data)
+ except ValueError:
+ return dict()
+ else:
+ raw_data = ' '.join(raw_data.split())
+ return dict(regex.findall(raw_data))
diff --git a/collectors/python.d.plugin/phpfpm/phpfpm.conf b/collectors/python.d.plugin/phpfpm/phpfpm.conf
new file mode 100644
index 000000000..571eb9156
--- /dev/null
+++ b/collectors/python.d.plugin/phpfpm/phpfpm.conf
@@ -0,0 +1,90 @@
+# netdata python.d.plugin configuration for PHP-FPM
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, PHP-FPM also supports the following:
+#
+# url: 'URL' # the URL to fetch nginx's status stats
+# # Be sure and include ?full&status at the end of the url
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ url : "http://localhost/status?full&json"
+
+localipv4:
+ name : 'local'
+ url : "http://127.0.0.1/status?full&json"
+
+localipv6:
+ name : 'local'
+ url : "http://[::1]/status?full&json"
+
diff --git a/collectors/python.d.plugin/portcheck/Makefile.inc b/collectors/python.d.plugin/portcheck/Makefile.inc
new file mode 100644
index 000000000..76763f02f
--- /dev/null
+++ b/collectors/python.d.plugin/portcheck/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += portcheck/portcheck.chart.py
+dist_pythonconfig_DATA += portcheck/portcheck.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += portcheck/README.md portcheck/Makefile.inc
+
diff --git a/collectors/python.d.plugin/portcheck/README.md b/collectors/python.d.plugin/portcheck/README.md
new file mode 100644
index 000000000..f1338d576
--- /dev/null
+++ b/collectors/python.d.plugin/portcheck/README.md
@@ -0,0 +1,35 @@
+# portcheck
+
+Module monitors a remote TCP service.
+
+Following charts are drawn per host:
+
+1. **Latency** ms
+ * Time required to connect to a TCP port.
+ Displays latency in 0.1 ms resolution. If the connection failed, the value is missing.
+
+2. **Status** boolean
+ * Connection successful
+ * Could not create socket: possible DNS problems
+ * Connection refused: port not listening or blocked
+ * Connection timed out: host or port unreachable
+
+
+### configuration
+
+```yaml
+server:
+ host: 'dns or ip' # required
+ port: 22 # required
+ timeout: 1 # optional
+ update_every: 1 # optional
+```
+
+### notes
+
+ * The error chart is intended for alarms, badges or for access via API.
+ * A system/service/firewall might block netdata's access if a portscan or
+ similar is detected.
+ * Currently, the accuracy of the latency is low and should be used as reference only.
+
+---
diff --git a/collectors/python.d.plugin/portcheck/portcheck.chart.py b/collectors/python.d.plugin/portcheck/portcheck.chart.py
new file mode 100644
index 000000000..e86f82544
--- /dev/null
+++ b/collectors/python.d.plugin/portcheck/portcheck.chart.py
@@ -0,0 +1,161 @@
+# -*- coding: utf-8 -*-
+# Description: simple port check netdata python.d module
+# Original Author: ccremer (github.com/ccremer)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import socket
+
+try:
+ from time import monotonic as time
+except ImportError:
+ from time import time
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+# default module values (can be overridden per job in `config`)
+priority = 60000
+retries = 60
+
+PORT_LATENCY = 'connect'
+
+PORT_SUCCESS = 'success'
+PORT_TIMEOUT = 'timeout'
+PORT_FAILED = 'no_connection'
+
+ORDER = ['latency', 'status']
+
+CHARTS = {
+ 'latency': {
+ 'options': [None, 'TCP connect latency', 'ms', 'latency', 'portcheck.latency', 'line'],
+ 'lines': [
+ [PORT_LATENCY, 'connect', 'absolute', 100, 1000]
+ ]
+ },
+ 'status': {
+ 'options': [None, 'Portcheck status', 'boolean', 'status', 'portcheck.status', 'line'],
+ 'lines': [
+ [PORT_SUCCESS, 'success', 'absolute'],
+ [PORT_TIMEOUT, 'timeout', 'absolute'],
+ [PORT_FAILED, 'no connection', 'absolute']
+ ]
+ }
+}
+
+
+# Not deriving from SocketService, too much is different
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.host = self.configuration.get('host')
+ self.port = self.configuration.get('port')
+ self.timeout = self.configuration.get('timeout', 1)
+
+ def check(self):
+ """
+ Parse configuration, check if configuration is available, and dynamically create chart lines data
+ :return: boolean
+ """
+ if self.host is None or self.port is None:
+ self.error('Host or port missing')
+ return False
+ if not isinstance(self.port, int):
+ self.error('"port" is not an integer. Specify a numerical value, not service name.')
+ return False
+
+ self.debug('Enabled portcheck: {host}:{port}, update every {update}s, timeout: {timeout}s'.format(
+ host=self.host, port=self.port, update=self.update_every, timeout=self.timeout
+ ))
+ # We will accept any (valid-ish) configuration, even if initial connection fails (a service might be down from
+ # the beginning)
+ return True
+
+ def _get_data(self):
+ """
+ Get data from socket
+ :return: dict
+ """
+ data = dict()
+ data[PORT_SUCCESS] = 0
+ data[PORT_TIMEOUT] = 0
+ data[PORT_FAILED] = 0
+
+ success = False
+ try:
+ for socket_config in socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM):
+ # use first working socket
+ sock = self._create_socket(socket_config)
+ if sock is not None:
+ self._connect2socket(data, socket_config, sock)
+ self._disconnect(sock)
+ success = True
+ break
+ except socket.gaierror as error:
+ self.debug('Failed to connect to "{host}:{port}", error: {error}'.format(
+ host=self.host, port=self.port, error=error
+ ))
+
+ # We could not connect
+ if not success:
+ data[PORT_FAILED] = 1
+
+ return data
+
+ def _create_socket(self, socket_config):
+ af, sock_type, proto, _, sa = socket_config
+ try:
+ self.debug('Creating socket to "{address}", port {port}'.format(address=sa[0], port=sa[1]))
+ sock = socket.socket(af, sock_type, proto)
+ sock.settimeout(self.timeout)
+ return sock
+ except socket.error as error:
+ self.debug('Failed to create socket "{address}", port {port}, error: {error}'.format(
+ address=sa[0], port=sa[1], error=error
+ ))
+ return None
+
+ def _connect2socket(self, data, socket_config, sock):
+ """
+ Connect to a socket, passing the result of getaddrinfo()
+ :return: dict
+ """
+
+ af, _, proto, _, sa = socket_config
+ port = str(sa[1])
+ try:
+ self.debug('Connecting socket to "{address}", port {port}'.format(address=sa[0], port=port))
+ start = time()
+ sock.connect(sa)
+ diff = time() - start
+ self.debug('Connected to "{address}", port {port}, latency {latency}'.format(
+ address=sa[0], port=port, latency=diff
+ ))
+ # we will set it at least 0.1 ms. 0.0 would mean failed connection (handy for 3rd-party-APIs)
+ data[PORT_LATENCY] = max(round(diff * 10000), 0)
+ data[PORT_SUCCESS] = 1
+
+ except socket.timeout as error:
+ self.debug('Socket timed out on "{address}", port {port}, error: {error}'.format(
+ address=sa[0], port=port, error=error
+ ))
+ data[PORT_TIMEOUT] = 1
+
+ except socket.error as error:
+ self.debug('Failed to connect to "{address}", port {port}, error: {error}'.format(
+ address=sa[0], port=port, error=error
+ ))
+ data[PORT_FAILED] = 1
+
+ def _disconnect(self, sock):
+ """
+ Close socket connection
+ :return:
+ """
+ if sock is not None:
+ try:
+ self.debug('Closing socket')
+ sock.shutdown(2) # 0 - read, 1 - write, 2 - all
+ sock.close()
+ except socket.error:
+ pass
diff --git a/collectors/python.d.plugin/portcheck/portcheck.conf b/collectors/python.d.plugin/portcheck/portcheck.conf
new file mode 100644
index 000000000..b3dd8bd3f
--- /dev/null
+++ b/collectors/python.d.plugin/portcheck/portcheck.conf
@@ -0,0 +1,70 @@
+# netdata python.d.plugin configuration for portcheck
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# chart_cleanup sets the default chart cleanup interval in iterations.
+# A chart is marked as obsolete if it has not been updated
+# 'chart_cleanup' iterations in a row.
+# They will be hidden immediately (not offered to dashboard viewer,
+# streamed upstream and archived to backends) and deleted one hour
+# later (configurable from netdata.conf).
+# -- For this plugin, cleanup MUST be disabled, otherwise we lose latency chart
+chart_cleanup: 0
+
+# Autodetection and retries do not work for this plugin
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# -------------------------------
+# ATTENTION: Any valid configuration will be accepted, even if initial connection fails!
+# -------------------------------
+#
+# There is intentionally no default config for 'localhost'
+
+# job_name:
+# name: myname # [optional] the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # [optional] the JOB's data collection frequency
+# priority: 60000 # [optional] the JOB's order on the dashboard
+# retries: 60 # [optional] the JOB's number of restoration attempts
+# timeout: 1 # [optional] the socket timeout when connecting
+# host: 'dns or ip' # [required] the remote host address in either IPv4, IPv6 or as DNS name.
+# port: 22 # [required] the port number to check. Specify an integer, not service name.
+
+# You just have been warned about possible portscan blocking. The portcheck plugin is meant for simple use cases.
+# Currently, the accuracy of the latency is low and should be used as reference only.
+
diff --git a/collectors/python.d.plugin/postfix/Makefile.inc b/collectors/python.d.plugin/postfix/Makefile.inc
new file mode 100644
index 000000000..f4091b217
--- /dev/null
+++ b/collectors/python.d.plugin/postfix/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += postfix/postfix.chart.py
+dist_pythonconfig_DATA += postfix/postfix.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += postfix/README.md postfix/Makefile.inc
+
diff --git a/collectors/python.d.plugin/postfix/README.md b/collectors/python.d.plugin/postfix/README.md
new file mode 100644
index 000000000..77c95ff44
--- /dev/null
+++ b/collectors/python.d.plugin/postfix/README.md
@@ -0,0 +1,15 @@
+# postfix
+
+Simple module executing `postfix -p` to grab postfix queue.
+
+It produces only two charts:
+
+1. **Postfix Queue Emails**
+ * emails
+
+2. **Postfix Queue Emails Size** in KB
+ * size
+
+Configuration is not needed.
+
+---
diff --git a/collectors/python.d.plugin/postfix/postfix.chart.py b/collectors/python.d.plugin/postfix/postfix.chart.py
new file mode 100644
index 000000000..bdbd0feea
--- /dev/null
+++ b/collectors/python.d.plugin/postfix/postfix.chart.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+# Description: postfix netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from bases.FrameworkServices.ExecutableService import ExecutableService
+
+# default module values (can be overridden per job in `config`)
+# update_every = 2
+priority = 60000
+retries = 60
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = ['qemails', 'qsize']
+
+CHARTS = {
+ 'qemails': {
+ 'options': [None, 'Postfix Queue Emails', 'emails', 'queue', 'postfix.qemails', 'line'],
+ 'lines': [
+ ['emails', None, 'absolute']
+ ]
+ },
+ 'qsize': {
+ 'options': [None, 'Postfix Queue Emails Size', 'emails size in KB', 'queue', 'postfix.qsize', 'area'],
+ 'lines': [
+ ['size', None, 'absolute']
+ ]
+ }
+}
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(self, configuration=configuration, name=name)
+ self.command = 'postqueue -p'
+ self.order = ORDER
+ self.definitions = CHARTS
+
+ def _get_data(self):
+ """
+ Format data received from shell command
+ :return: dict
+ """
+ try:
+ raw = self._get_raw_data()[-1].split(' ')
+ if raw[0] == 'Mail' and raw[1] == 'queue':
+ return {'emails': 0,
+ 'size': 0}
+
+ return {'emails': raw[4],
+ 'size': raw[1]}
+ except (ValueError, AttributeError):
+ return None
diff --git a/collectors/python.d.plugin/postfix/postfix.conf b/collectors/python.d.plugin/postfix/postfix.conf
new file mode 100644
index 000000000..e0d5a5f83
--- /dev/null
+++ b/collectors/python.d.plugin/postfix/postfix.conf
@@ -0,0 +1,74 @@
+# netdata python.d.plugin configuration for postfix
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# postfix is slow, so once every 10 seconds
+update_every: 10
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, postfix also supports the following:
+#
+# command: 'postqueue -p' # the command to run
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+
+local:
+ command: 'postqueue -p'
diff --git a/collectors/python.d.plugin/postgres/Makefile.inc b/collectors/python.d.plugin/postgres/Makefile.inc
new file mode 100644
index 000000000..91a185cb9
--- /dev/null
+++ b/collectors/python.d.plugin/postgres/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += postgres/postgres.chart.py
+dist_pythonconfig_DATA += postgres/postgres.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += postgres/README.md postgres/Makefile.inc
+
diff --git a/collectors/python.d.plugin/postgres/README.md b/collectors/python.d.plugin/postgres/README.md
new file mode 100644
index 000000000..e7b108d36
--- /dev/null
+++ b/collectors/python.d.plugin/postgres/README.md
@@ -0,0 +1,68 @@
+# postgres
+
+Module monitors one or more postgres servers.
+
+**Requirements:**
+
+ * `python-psycopg2` package. You have to install it manually.
+
+Following charts are drawn:
+
+1. **Database size** MB
+ * size
+
+2. **Current Backend Processes** processes
+ * active
+
+3. **Write-Ahead Logging Statistics** files/s
+ * total
+ * ready
+ * done
+
+4. **Checkpoints** writes/s
+ * scheduled
+ * requested
+
+5. **Current connections to db** count
+ * connections
+
+6. **Tuples returned from db** tuples/s
+ * sequential
+ * bitmap
+
+7. **Tuple reads from db** reads/s
+ * disk
+ * cache
+
+8. **Transactions on db** transactions/s
+ * committed
+ * rolled back
+
+9. **Tuples written to db** writes/s
+ * inserted
+ * updated
+ * deleted
+ * conflicts
+
+10. **Locks on db** count per type
+ * locks
+
+### configuration
+
+```yaml
+socket:
+ name : 'socket'
+ user : 'postgres'
+ database : 'postgres'
+
+tcp:
+ name : 'tcp'
+ user : 'postgres'
+ database : 'postgres'
+ host : 'localhost'
+ port : 5432
+```
+
+When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:5432`.
+
+---
diff --git a/collectors/python.d.plugin/postgres/postgres.chart.py b/collectors/python.d.plugin/postgres/postgres.chart.py
new file mode 100644
index 000000000..7f43877c3
--- /dev/null
+++ b/collectors/python.d.plugin/postgres/postgres.chart.py
@@ -0,0 +1,823 @@
+# -*- coding: utf-8 -*-
+# Description: example netdata python.d module
+# Authors: facetoe, dangtranhoang
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from copy import deepcopy
+
+try:
+ import psycopg2
+ from psycopg2 import extensions
+ from psycopg2.extras import DictCursor
+ from psycopg2 import OperationalError
+ PSYCOPG2 = True
+except ImportError:
+ PSYCOPG2 = False
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+# default module values
+update_every = 1
+priority = 60000
+retries = 60
+
+METRICS = {
+ 'DATABASE': [
+ 'connections',
+ 'xact_commit',
+ 'xact_rollback',
+ 'blks_read',
+ 'blks_hit',
+ 'tup_returned',
+ 'tup_fetched',
+ 'tup_inserted',
+ 'tup_updated',
+ 'tup_deleted',
+ 'conflicts',
+ 'temp_files',
+ 'temp_bytes',
+ 'size'
+ ],
+ 'BACKENDS': [
+ 'backends_active',
+ 'backends_idle'
+ ],
+ 'INDEX_STATS': [
+ 'index_count',
+ 'index_size'
+ ],
+ 'TABLE_STATS': [
+ 'table_size',
+ 'table_count'
+ ],
+ 'WAL': [
+ 'written_wal',
+ 'recycled_wal',
+ 'total_wal'
+ ],
+ 'WAL_WRITES': [
+ 'wal_writes'
+ ],
+ 'ARCHIVE': [
+ 'ready_count',
+ 'done_count',
+ 'file_count'
+ ],
+ 'BGWRITER': [
+ 'checkpoint_scheduled',
+ 'checkpoint_requested',
+ 'buffers_checkpoint',
+ 'buffers_clean',
+ 'maxwritten_clean',
+ 'buffers_backend',
+ 'buffers_alloc',
+ 'buffers_backend_fsync'
+ ],
+ 'LOCKS': [
+ 'ExclusiveLock',
+ 'RowShareLock',
+ 'SIReadLock',
+ 'ShareUpdateExclusiveLock',
+ 'AccessExclusiveLock',
+ 'AccessShareLock',
+ 'ShareRowExclusiveLock',
+ 'ShareLock',
+ 'RowExclusiveLock'
+ ],
+ 'AUTOVACUUM': [
+ 'analyze',
+ 'vacuum_analyze',
+ 'vacuum',
+ 'vacuum_freeze',
+ 'brin_summarize'
+ ],
+ 'STANDBY_DELTA': [
+ 'sent_delta',
+ 'write_delta',
+ 'flush_delta',
+ 'replay_delta'
+ ],
+ 'REPSLOT_FILES': [
+ 'replslot_wal_keep',
+ 'replslot_files'
+ ]
+}
+
+QUERIES = {
+ 'WAL': """
+SELECT
+ count(*) as total_wal,
+ count(*) FILTER (WHERE type = 'recycled') AS recycled_wal,
+ count(*) FILTER (WHERE type = 'written') AS written_wal
+FROM
+ (SELECT
+ wal.name,
+ pg_{0}file_name(
+ CASE pg_is_in_recovery()
+ WHEN true THEN NULL
+ ELSE pg_current_{0}_{1}()
+ END ),
+ CASE
+ WHEN wal.name > pg_{0}file_name(
+ CASE pg_is_in_recovery()
+ WHEN true THEN NULL
+ ELSE pg_current_{0}_{1}()
+ END ) THEN 'recycled'
+ ELSE 'written'
+ END AS type
+ FROM pg_catalog.pg_ls_dir('pg_{0}') AS wal(name)
+ WHERE name ~ '^[0-9A-F]{{24}}$'
+ ORDER BY
+ (pg_stat_file('pg_{0}/'||name)).modification,
+ wal.name DESC) sub;
+""",
+ 'ARCHIVE': """
+SELECT
+ CAST(COUNT(*) AS INT) AS file_count,
+ CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.ready$$r$ as INT)),0) AS INT) AS ready_count,
+ CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.done$$r$ AS INT)),0) AS INT) AS done_count
+FROM
+ pg_catalog.pg_ls_dir('pg_{0}/archive_status') AS archive_files (archive_file);
+""",
+ 'BACKENDS': """
+SELECT
+ count(*) - (SELECT count(*)
+ FROM pg_stat_activity
+ WHERE state = 'idle')
+ AS backends_active,
+ (SELECT count(*)
+ FROM pg_stat_activity
+ WHERE state = 'idle')
+ AS backends_idle
+FROM pg_stat_activity;
+""",
+ 'TABLE_STATS': """
+SELECT
+ ((sum(relpages) * 8) * 1024) AS table_size,
+ count(1) AS table_count
+FROM pg_class
+WHERE relkind IN ('r', 't');
+""",
+ 'INDEX_STATS': """
+SELECT
+ ((sum(relpages) * 8) * 1024) AS index_size,
+ count(1) AS index_count
+FROM pg_class
+WHERE relkind = 'i';
+""",
+ 'DATABASE': """
+SELECT
+ datname AS database_name,
+ numbackends AS connections,
+ xact_commit AS xact_commit,
+ xact_rollback AS xact_rollback,
+ blks_read AS blks_read,
+ blks_hit AS blks_hit,
+ tup_returned AS tup_returned,
+ tup_fetched AS tup_fetched,
+ tup_inserted AS tup_inserted,
+ tup_updated AS tup_updated,
+ tup_deleted AS tup_deleted,
+ conflicts AS conflicts,
+ pg_database_size(datname) AS size,
+ temp_files AS temp_files,
+ temp_bytes AS temp_bytes
+FROM pg_stat_database
+WHERE datname IN %(databases)s ;
+""",
+ 'BGWRITER': """
+SELECT
+ checkpoints_timed AS checkpoint_scheduled,
+ checkpoints_req AS checkpoint_requested,
+ buffers_checkpoint * current_setting('block_size')::numeric buffers_checkpoint,
+ buffers_clean * current_setting('block_size')::numeric buffers_clean,
+ maxwritten_clean,
+ buffers_backend * current_setting('block_size')::numeric buffers_backend,
+ buffers_alloc * current_setting('block_size')::numeric buffers_alloc,
+ buffers_backend_fsync
+FROM pg_stat_bgwriter;
+""",
+ 'LOCKS': """
+SELECT
+ pg_database.datname as database_name,
+ mode,
+ count(mode) AS locks_count
+FROM pg_locks
+INNER JOIN pg_database
+ ON pg_database.oid = pg_locks.database
+GROUP BY datname, mode
+ORDER BY datname, mode;
+""",
+ 'FIND_DATABASES': """
+SELECT
+ datname
+FROM pg_stat_database
+WHERE
+ has_database_privilege(
+ (SELECT current_user), datname, 'connect')
+ AND NOT datname ~* '^template\d ';
+""",
+ 'FIND_STANDBY': """
+SELECT
+ application_name
+FROM pg_stat_replication
+WHERE application_name IS NOT NULL
+GROUP BY application_name;
+""",
+ 'FIND_REPLICATION_SLOT': """
+SELECT slot_name
+FROM pg_replication_slots;
+""",
+ 'STANDBY_DELTA': """
+SELECT
+ application_name,
+ pg_{0}_{1}_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_{0}_receive_{1}()
+ ELSE pg_current_{0}_{1}()
+ END,
+ sent_{1}) AS sent_delta,
+ pg_{0}_{1}_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_{0}_receive_{1}()
+ ELSE pg_current_{0}_{1}()
+ END,
+ write_{1}) AS write_delta,
+ pg_{0}_{1}_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_{0}_receive_{1}()
+ ELSE pg_current_{0}_{1}()
+ END,
+ flush_{1}) AS flush_delta,
+ pg_{0}_{1}_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_{0}_receive_{1}()
+ ELSE pg_current_{0}_{1}()
+ END,
+ replay_{1}) AS replay_delta
+FROM pg_stat_replication
+WHERE application_name IS NOT NULL;
+""",
+ 'REPSLOT_FILES': """
+WITH wal_size AS (
+ SELECT
+ current_setting('wal_block_size')::INT * setting::INT AS val
+ FROM pg_settings
+ WHERE name = 'wal_segment_size'
+ )
+SELECT
+ slot_name,
+ slot_type,
+ replslot_wal_keep,
+ count(slot_file) AS replslot_files
+FROM
+ (SELECT
+ slot.slot_name,
+ CASE
+ WHEN slot_file <> 'state' THEN 1
+ END AS slot_file ,
+ slot_type,
+ COALESCE (
+ floor(
+ (pg_wal_lsn_diff(pg_current_wal_lsn (),slot.restart_lsn)
+ - (pg_walfile_name_offset (restart_lsn)).file_offset) / (s.val)
+ ),0) AS replslot_wal_keep
+ FROM pg_replication_slots slot
+ LEFT JOIN (
+ SELECT
+ slot2.slot_name,
+ pg_ls_dir('pg_replslot/' || slot2.slot_name) AS slot_file
+ FROM pg_replication_slots slot2
+ ) files (slot_name, slot_file)
+ ON slot.slot_name = files.slot_name
+ CROSS JOIN wal_size s
+ ) AS d
+GROUP BY
+ slot_name,
+ slot_type,
+ replslot_wal_keep;
+""",
+ 'IF_SUPERUSER': """
+SELECT current_setting('is_superuser') = 'on' AS is_superuser;
+""",
+ 'DETECT_SERVER_VERSION': """
+SHOW server_version_num;
+""",
+ 'AUTOVACUUM': """
+SELECT
+ count(*) FILTER (WHERE query LIKE 'autovacuum: ANALYZE%%') AS analyze,
+ count(*) FILTER (WHERE query LIKE 'autovacuum: VACUUM ANALYZE%%') AS vacuum_analyze,
+ count(*) FILTER (WHERE query LIKE 'autovacuum: VACUUM%%'
+ AND query NOT LIKE 'autovacuum: VACUUM ANALYZE%%'
+ AND query NOT LIKE '%%to prevent wraparound%%') AS vacuum,
+ count(*) FILTER (WHERE query LIKE '%%to prevent wraparound%%') AS vacuum_freeze,
+ count(*) FILTER (WHERE query LIKE 'autovacuum: BRIN summarize%%') AS brin_summarize
+FROM pg_stat_activity
+WHERE query NOT LIKE '%%pg_stat_activity%%';
+""",
+ 'DIFF_LSN': """
+SELECT
+ pg_{0}_{1}_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_{0}_receive_{1}()
+ ELSE pg_current_{0}_{1}()
+ END,
+ '0/0') as wal_writes ;
+"""
+}
+
+
+QUERY_STATS = {
+ QUERIES['DATABASE']: METRICS['DATABASE'],
+ QUERIES['BACKENDS']: METRICS['BACKENDS'],
+ QUERIES['LOCKS']: METRICS['LOCKS']
+}
+
+ORDER = [
+ 'db_stat_temp_files',
+ 'db_stat_temp_bytes',
+ 'db_stat_blks',
+ 'db_stat_tuple_returned',
+ 'db_stat_tuple_write',
+ 'db_stat_transactions',
+ 'db_stat_connections',
+ 'database_size',
+ 'backend_process',
+ 'index_count',
+ 'index_size',
+ 'table_count',
+ 'table_size',
+ 'wal',
+ 'wal_writes',
+ 'archive_wal',
+ 'checkpointer',
+ 'stat_bgwriter_alloc',
+ 'stat_bgwriter_checkpoint',
+ 'stat_bgwriter_backend',
+ 'stat_bgwriter_backend_fsync',
+ 'stat_bgwriter_bgwriter',
+ 'stat_bgwriter_maxwritten',
+ 'replication_slot',
+ 'standby_delta',
+ 'autovacuum'
+]
+
+CHARTS = {
+ 'db_stat_transactions': {
+ 'options': [None, 'Transactions on db', 'transactions/s', 'db statistics', 'postgres.db_stat_transactions',
+ 'line'],
+ 'lines': [
+ ['xact_commit', 'committed', 'incremental'],
+ ['xact_rollback', 'rolled back', 'incremental']
+ ]
+ },
+ 'db_stat_connections': {
+ 'options': [None, 'Current connections to db', 'count', 'db statistics', 'postgres.db_stat_connections',
+ 'line'],
+ 'lines': [
+ ['connections', 'connections', 'absolute']
+ ]
+ },
+ 'db_stat_blks': {
+ 'options': [None, 'Disk blocks reads from db', 'reads/s', 'db statistics', 'postgres.db_stat_blks', 'line'],
+ 'lines': [
+ ['blks_read', 'disk', 'incremental'],
+ ['blks_hit', 'cache', 'incremental']
+ ]
+ },
+ 'db_stat_tuple_returned': {
+ 'options': [None, 'Tuples returned from db', 'tuples/s', 'db statistics', 'postgres.db_stat_tuple_returned',
+ 'line'],
+ 'lines': [
+ ['tup_returned', 'sequential', 'incremental'],
+ ['tup_fetched', 'bitmap', 'incremental']
+ ]
+ },
+ 'db_stat_tuple_write': {
+ 'options': [None, 'Tuples written to db', 'writes/s', 'db statistics', 'postgres.db_stat_tuple_write', 'line'],
+ 'lines': [
+ ['tup_inserted', 'inserted', 'incremental'],
+ ['tup_updated', 'updated', 'incremental'],
+ ['tup_deleted', 'deleted', 'incremental'],
+ ['conflicts', 'conflicts', 'incremental']
+ ]
+ },
+ 'db_stat_temp_bytes': {
+ 'options': [None, 'Temp files written to disk', 'KB/s', 'db statistics', 'postgres.db_stat_temp_bytes',
+ 'line'],
+ 'lines': [
+ ['temp_bytes', 'size', 'incremental', 1, 1024]
+ ]
+ },
+ 'db_stat_temp_files': {
+ 'options': [None, 'Temp files written to disk', 'files', 'db statistics', 'postgres.db_stat_temp_files',
+ 'line'],
+ 'lines': [
+ ['temp_files', 'files', 'incremental']
+ ]
+ },
+ 'database_size': {
+ 'options': [None, 'Database size', 'MB', 'database size', 'postgres.db_size', 'stacked'],
+ 'lines': [
+ ]
+ },
+ 'backend_process': {
+ 'options': [None, 'Current Backend Processes', 'processes', 'backend processes', 'postgres.backend_process',
+ 'line'],
+ 'lines': [
+ ['backends_active', 'active', 'absolute'],
+ ['backends_idle', 'idle', 'absolute']
+ ]
+ },
+ 'index_count': {
+ 'options': [None, 'Total indexes', 'index', 'indexes', 'postgres.index_count', 'line'],
+ 'lines': [
+ ['index_count', 'total', 'absolute']
+ ]
+ },
+ 'index_size': {
+ 'options': [None, 'Indexes size', 'MB', 'indexes', 'postgres.index_size', 'line'],
+ 'lines': [
+ ['index_size', 'size', 'absolute', 1, 1024 * 1024]
+ ]
+ },
+ 'table_count': {
+ 'options': [None, 'Total Tables', 'tables', 'tables', 'postgres.table_count', 'line'],
+ 'lines': [
+ ['table_count', 'total', 'absolute']
+ ]
+ },
+ 'table_size': {
+ 'options': [None, 'Tables size', 'MB', 'tables', 'postgres.table_size', 'line'],
+ 'lines': [
+ ['table_size', 'size', 'absolute', 1, 1024 * 1024]
+ ]
+ },
+ 'wal': {
+ 'options': [None, 'Write-Ahead Logs', 'files', 'wal', 'postgres.wal', 'line'],
+ 'lines': [
+ ['written_wal', 'written', 'absolute'],
+ ['recycled_wal', 'recycled', 'absolute'],
+ ['total_wal', 'total', 'absolute']
+ ]
+ },
+ 'wal_writes': {
+ 'options': [None, 'Write-Ahead Logs', 'kilobytes/s', 'wal_writes', 'postgres.wal_writes', 'line'],
+ 'lines': [
+ ['wal_writes', 'writes', 'incremental', 1, 1024]
+ ]
+ },
+ 'archive_wal': {
+ 'options': [None, 'Archive Write-Ahead Logs', 'files/s', 'archive wal', 'postgres.archive_wal', 'line'],
+ 'lines': [
+ ['file_count', 'total', 'incremental'],
+ ['ready_count', 'ready', 'incremental'],
+ ['done_count', 'done', 'incremental']
+ ]
+ },
+ 'checkpointer': {
+ 'options': [None, 'Checkpoints', 'writes', 'checkpointer', 'postgres.checkpointer', 'line'],
+ 'lines': [
+ ['checkpoint_scheduled', 'scheduled', 'incremental'],
+ ['checkpoint_requested', 'requested', 'incremental']
+ ]
+ },
+ 'stat_bgwriter_alloc': {
+ 'options': [None, 'Buffers allocated', 'kilobytes/s', 'bgwriter', 'postgres.stat_bgwriter_alloc', 'line'],
+ 'lines': [
+ ['buffers_alloc', 'alloc', 'incremental', 1, 1024]
+ ]
+ },
+ 'stat_bgwriter_checkpoint': {
+ 'options': [None, 'Buffers written during checkpoints', 'kilobytes/s', 'bgwriter',
+ 'postgres.stat_bgwriter_checkpoint', 'line'],
+ 'lines': [
+ ['buffers_checkpoint', 'checkpoint', 'incremental', 1, 1024]
+ ]
+ },
+ 'stat_bgwriter_backend': {
+ 'options': [None, 'Buffers written directly by a backend', 'kilobytes/s', 'bgwriter',
+ 'postgres.stat_bgwriter_backend', 'line'],
+ 'lines': [
+ ['buffers_backend', 'backend', 'incremental', 1, 1024]
+ ]
+ },
+ 'stat_bgwriter_backend_fsync': {
+ 'options': [None, 'Fsync by backend', 'times', 'bgwriter', 'postgres.stat_bgwriter_backend_fsync', 'line'],
+ 'lines': [
+ ['buffers_backend_fsync', 'backend fsync', 'incremental']
+ ]
+ },
+ 'stat_bgwriter_bgwriter': {
+ 'options': [None, 'Buffers written by the background writer', 'kilobytes/s', 'bgwriter',
+ 'postgres.bgwriter_bgwriter', 'line'],
+ 'lines': [
+ ['buffers_clean', 'clean', 'incremental', 1, 1024]
+ ]
+ },
+ 'stat_bgwriter_maxwritten': {
+ 'options': [None, 'Too many buffers written', 'times', 'bgwriter', 'postgres.stat_bgwriter_maxwritten',
+ 'line'],
+ 'lines': [
+ ['maxwritten_clean', 'maxwritten', 'incremental']
+ ]
+ },
+ 'autovacuum': {
+ 'options': [None, 'Autovacuum workers', 'workers', 'autovacuum', 'postgres.autovacuum', 'line'],
+ 'lines': [
+ ['analyze', 'analyze', 'absolute'],
+ ['vacuum', 'vacuum', 'absolute'],
+ ['vacuum_analyze', 'vacuum analyze', 'absolute'],
+ ['vacuum_freeze', 'vacuum freeze', 'absolute'],
+ ['brin_summarize', 'brin summarize', 'absolute']
+ ]
+ },
+ 'standby_delta': {
+ 'options': [None, 'Standby delta', 'kilobytes', 'replication delta', 'postgres.standby_delta', 'line'],
+ 'lines': [
+ ['sent_delta', 'sent delta', 'absolute', 1, 1024],
+ ['write_delta', 'write delta', 'absolute', 1, 1024],
+ ['flush_delta', 'flush delta', 'absolute', 1, 1024],
+ ['replay_delta', 'replay delta', 'absolute', 1, 1024]
+ ]
+ },
+ 'replication_slot': {
+ 'options': [None, 'Replication slot files', 'files', 'replication slot', 'postgres.replication_slot', 'line'],
+ 'lines': [
+ ['replslot_wal_keep', 'wal keeped', 'absolute'],
+ ['replslot_files', 'pg_replslot files', 'absolute']
+ ]
+ }
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER[:]
+ self.definitions = deepcopy(CHARTS)
+ self.table_stats = configuration.pop('table_stats', False)
+ self.index_stats = configuration.pop('index_stats', False)
+ self.database_poll = configuration.pop('database_poll', None)
+ self.configuration = configuration
+ self.connection = False
+ self.server_version = None
+ self.data = dict()
+ self.locks_zeroed = dict()
+ self.databases = list()
+ self.secondaries = list()
+ self.replication_slots = list()
+ self.queries = QUERY_STATS.copy()
+
+ def _connect(self):
+ params = dict(user='postgres',
+ database=None,
+ password=None,
+ host=None,
+ port=5432)
+ params.update(self.configuration)
+
+ if not self.connection:
+ try:
+ self.connection = psycopg2.connect(**params)
+ self.connection.set_isolation_level(extensions.ISOLATION_LEVEL_AUTOCOMMIT)
+ self.connection.set_session(readonly=True)
+ except OperationalError as error:
+ return False, str(error)
+ return True, True
+
+ def check(self):
+ if not PSYCOPG2:
+ self.error('\'python-psycopg2\' module is needed to use postgres.chart.py')
+ return False
+ result, error = self._connect()
+ if not result:
+ conf = dict((k, (lambda k, v: v if k != 'password' else '*****')(k, v))
+ for k, v in self.configuration.items())
+ self.error('Failed to connect to %s. Error: %s' % (str(conf), error))
+ return False
+ try:
+ cursor = self.connection.cursor()
+ self.databases = discover_databases_(cursor, QUERIES['FIND_DATABASES'])
+ is_superuser = check_if_superuser_(cursor, QUERIES['IF_SUPERUSER'])
+ self.secondaries = discover_secondaries_(cursor, QUERIES['FIND_STANDBY'])
+ self.server_version = detect_server_version(cursor, QUERIES['DETECT_SERVER_VERSION'])
+ if self.server_version >= 94000:
+ self.replication_slots = discover_replication_slots_(cursor, QUERIES['FIND_REPLICATION_SLOT'])
+ cursor.close()
+
+ if self.database_poll and isinstance(self.database_poll, str):
+ self.databases = [dbase for dbase in self.databases if dbase in self.database_poll.split()] \
+ or self.databases
+
+ self.locks_zeroed = populate_lock_types(self.databases)
+ self.add_additional_queries_(is_superuser)
+ self.create_dynamic_charts_()
+ return True
+ except Exception as error:
+ self.error(str(error))
+ return False
+
+ def add_additional_queries_(self, is_superuser):
+
+ if self.server_version >= 100000:
+ wal = 'wal'
+ lsn = 'lsn'
+ else:
+ wal = 'xlog'
+ lsn = 'location'
+ self.queries[QUERIES['BGWRITER']] = METRICS['BGWRITER']
+ self.queries[QUERIES['DIFF_LSN'].format(wal, lsn)] = METRICS['WAL_WRITES']
+ self.queries[QUERIES['STANDBY_DELTA'].format(wal, lsn)] = METRICS['STANDBY_DELTA']
+
+ if self.index_stats:
+ self.queries[QUERIES['INDEX_STATS']] = METRICS['INDEX_STATS']
+ if self.table_stats:
+ self.queries[QUERIES['TABLE_STATS']] = METRICS['TABLE_STATS']
+ if is_superuser:
+ self.queries[QUERIES['ARCHIVE'].format(wal)] = METRICS['ARCHIVE']
+ if self.server_version >= 90400:
+ self.queries[QUERIES['WAL'].format(wal, lsn)] = METRICS['WAL']
+ if self.server_version >= 100000:
+ self.queries[QUERIES['REPSLOT_FILES']] = METRICS['REPSLOT_FILES']
+ if self.server_version >= 90400:
+ self.queries[QUERIES['AUTOVACUUM']] = METRICS['AUTOVACUUM']
+
+ def create_dynamic_charts_(self):
+
+ for database_name in self.databases[::-1]:
+ self.definitions['database_size']['lines'].append(
+ [database_name + '_size', database_name, 'absolute', 1, 1024 * 1024])
+ for chart_name in [name for name in self.order if name.startswith('db_stat')]:
+ add_database_stat_chart_(order=self.order, definitions=self.definitions,
+ name=chart_name, database_name=database_name)
+
+ add_database_lock_chart_(order=self.order, definitions=self.definitions, database_name=database_name)
+
+ for application_name in self.secondaries[::-1]:
+ add_replication_delta_chart_(
+ order=self.order,
+ definitions=self.definitions,
+ name='standby_delta',
+ application_name=application_name)
+
+ for slot_name in self.replication_slots[::-1]:
+ add_replication_slot_chart_(
+ order=self.order,
+ definitions=self.definitions,
+ name='replication_slot',
+ slot_name=slot_name)
+
+ def _get_data(self):
+ result, _ = self._connect()
+ if result:
+ cursor = self.connection.cursor(cursor_factory=DictCursor)
+ try:
+ self.data.update(self.locks_zeroed)
+ for query, metrics in self.queries.items():
+ self.query_stats_(cursor, query, metrics)
+
+ except OperationalError:
+ self.connection = False
+ cursor.close()
+ return None
+ else:
+ cursor.close()
+ return self.data
+ else:
+ return None
+
+ def query_stats_(self, cursor, query, metrics):
+ cursor.execute(query, dict(databases=tuple(self.databases)))
+ for row in cursor:
+ for metric in metrics:
+ if 'database_name' in row:
+ dimension_id = '_'.join([row['database_name'], metric])
+ elif 'application_name' in row:
+ dimension_id = '_'.join([row['application_name'], metric])
+ elif 'slot_name' in row:
+ dimension_id = '_'.join([row['slot_name'], metric])
+ else:
+ dimension_id = metric
+ if metric in row:
+ if row[metric] is not None:
+ self.data[dimension_id] = int(row[metric])
+ elif 'locks_count' in row:
+ self.data[dimension_id] = row['locks_count'] if metric == row['mode'] else 0
+
+
+def discover_databases_(cursor, query):
+ cursor.execute(query)
+ result = list()
+ for db in [database[0] for database in cursor]:
+ if db not in result:
+ result.append(db)
+ return result
+
+
+def discover_secondaries_(cursor, query):
+ cursor.execute(query)
+ result = list()
+ for sc in [standby[0] for standby in cursor]:
+ if sc not in result:
+ result.append(sc)
+ return result
+
+
+def discover_replication_slots_(cursor, query):
+ cursor.execute(query)
+ result = list()
+ for slot in [replication_slot[0] for replication_slot in cursor]:
+ if slot not in result:
+ result.append(slot)
+ return result
+
+
+def check_if_superuser_(cursor, query):
+ cursor.execute(query)
+ return cursor.fetchone()[0]
+
+
+def detect_server_version(cursor, query):
+ cursor.execute(query)
+ return int(cursor.fetchone()[0])
+
+
+def populate_lock_types(databases):
+ result = dict()
+ for database in databases:
+ for lock_type in METRICS['LOCKS']:
+ key = '_'.join([database, lock_type])
+ result[key] = 0
+
+ return result
+
+
+def add_database_lock_chart_(order, definitions, database_name):
+ def create_lines(database):
+ result = list()
+ for lock_type in METRICS['LOCKS']:
+ dimension_id = '_'.join([database, lock_type])
+ result.append([dimension_id, lock_type, 'absolute'])
+ return result
+
+ chart_name = database_name + '_locks'
+ order.insert(-1, chart_name)
+ definitions[chart_name] = {
+ 'options':
+ [None, 'Locks on db: ' + database_name, 'locks', 'db ' + database_name, 'postgres.db_locks', 'line'],
+ 'lines': create_lines(database_name)
+ }
+
+
+def add_database_stat_chart_(order, definitions, name, database_name):
+ def create_lines(database, lines):
+ result = list()
+ for line in lines:
+ new_line = ['_'.join([database, line[0]])] + line[1:]
+ result.append(new_line)
+ return result
+
+ chart_template = CHARTS[name]
+ chart_name = '_'.join([database_name, name])
+ order.insert(0, chart_name)
+ name, title, units, _, context, chart_type = chart_template['options']
+ definitions[chart_name] = {
+ 'options': [name, title + ': ' + database_name, units, 'db ' + database_name, context, chart_type],
+ 'lines': create_lines(database_name, chart_template['lines'])}
+
+
+def add_replication_delta_chart_(order, definitions, name, application_name):
+ def create_lines(standby, lines):
+ result = list()
+ for line in lines:
+ new_line = ['_'.join([standby, line[0]])] + line[1:]
+ result.append(new_line)
+ return result
+
+ chart_template = CHARTS[name]
+ chart_name = '_'.join([application_name, name])
+ position = order.index('database_size')
+ order.insert(position, chart_name)
+ name, title, units, family, context, chart_type = chart_template['options']
+ definitions[chart_name] = {
+ 'options': [name, title + ': ' + application_name, units, 'replication delta', context, chart_type],
+ 'lines': create_lines(application_name, chart_template['lines'])}
+
+
+def add_replication_slot_chart_(order, definitions, name, slot_name):
+ def create_lines(slot, lines):
+ result = list()
+ for line in lines:
+ new_line = ['_'.join([slot, line[0]])] + line[1:]
+ result.append(new_line)
+ return result
+
+ chart_template = CHARTS[name]
+ chart_name = '_'.join([slot_name, name])
+ position = order.index('database_size')
+ order.insert(position, chart_name)
+ name, title, units, family, context, chart_type = chart_template['options']
+ definitions[chart_name] = {
+ 'options': [name, title + ': ' + slot_name, units, 'replication slot files', context, chart_type],
+ 'lines': create_lines(slot_name, chart_template['lines'])}
diff --git a/collectors/python.d.plugin/postgres/postgres.conf b/collectors/python.d.plugin/postgres/postgres.conf
new file mode 100644
index 000000000..b69ca3717
--- /dev/null
+++ b/collectors/python.d.plugin/postgres/postgres.conf
@@ -0,0 +1,124 @@
+# netdata python.d.plugin configuration for postgresql
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# A single connection is required in order to pull statistics.
+#
+# Connections can be configured with the following options:
+#
+# database : 'example_db_name'
+# user : 'example_user'
+# password : 'example_pass'
+# host : 'localhost'
+# port : 5432
+#
+# Additionally, the following options allow selective disabling of charts
+#
+# table_stats : false
+# index_stats : false
+# database_poll : 'dbase_name1 dbase_name2' # poll only specified databases (all other will be excluded from charts)
+#
+# Postgres permissions are configured at its pg_hba.conf file. You can
+# "trust" local clients to allow netdata to connect, or you can create
+# a postgres user for netdata and add its password below to allow
+# netdata connect.
+#
+# Postgres supported versions are :
+# - 9.3 (without autovacuum)
+# - 9.4
+# - 9.5
+# - 9.6
+# - 10
+#
+# Superuser access is needed for theses charts:
+# Write-Ahead Logs
+# Archive Write-Ahead Logs
+#
+# Autovacuum charts is allowed since Postgres 9.4
+# ----------------------------------------------------------------------
+
+socket:
+ name : 'local'
+ user : 'postgres'
+ database : 'postgres'
+
+tcp:
+ name : 'local'
+ database : 'postgres'
+ user : 'postgres'
+ host : 'localhost'
+ port : 5432
+
+tcpipv4:
+ name : 'local'
+ database : 'postgres'
+ user : 'postgres'
+ host : '127.0.0.1'
+ port : 5432
+
+tcpipv6:
+ name : 'local'
+ database : 'postgres'
+ user : 'postgres'
+ host : '::1'
+ port : 5432
+
diff --git a/collectors/python.d.plugin/powerdns/Makefile.inc b/collectors/python.d.plugin/powerdns/Makefile.inc
new file mode 100644
index 000000000..256d32a40
--- /dev/null
+++ b/collectors/python.d.plugin/powerdns/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += powerdns/powerdns.chart.py
+dist_pythonconfig_DATA += powerdns/powerdns.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += powerdns/README.md powerdns/Makefile.inc
+
diff --git a/collectors/python.d.plugin/powerdns/README.md b/collectors/python.d.plugin/powerdns/README.md
new file mode 100644
index 000000000..3c4b145e0
--- /dev/null
+++ b/collectors/python.d.plugin/powerdns/README.md
@@ -0,0 +1,77 @@
+# powerdns
+
+Module monitor powerdns performance and health metrics.
+
+Powerdns charts:
+
+1. **Queries and Answers**
+ * udp-queries
+ * udp-answers
+ * tcp-queries
+ * tcp-answers
+
+2. **Cache Usage**
+ * query-cache-hit
+ * query-cache-miss
+ * packetcache-hit
+ * packetcache-miss
+
+3. **Cache Size**
+ * query-cache-size
+ * packetcache-size
+ * key-cache-size
+ * meta-cache-size
+
+4. **Latency**
+ * latency
+
+ Powerdns Recursor charts:
+
+ 1. **Questions In**
+ * questions
+ * ipv6-questions
+ * tcp-queries
+
+2. **Questions Out**
+ * all-outqueries
+ * ipv6-outqueries
+ * tcp-outqueries
+ * throttled-outqueries
+
+3. **Answer Times**
+ * answers-slow
+ * answers0-1
+ * answers1-10
+ * answers10-100
+ * answers100-1000
+
+4. **Timeouts**
+ * outgoing-timeouts
+ * outgoing4-timeouts
+ * outgoing6-timeouts
+
+5. **Drops**
+ * over-capacity-drops
+
+6. **Cache Usage**
+ * cache-hits
+ * cache-misses
+ * packetcache-hits
+ * packetcache-misses
+
+7. **Cache Size**
+ * cache-entries
+ * packetcache-entries
+ * negcache-entries
+
+### configuration
+
+```yaml
+local:
+ name : 'local'
+ url : 'http://127.0.0.1:8081/api/v1/servers/localhost/statistics'
+ header :
+ X-API-Key: 'change_me'
+```
+
+---
diff --git a/collectors/python.d.plugin/powerdns/powerdns.chart.py b/collectors/python.d.plugin/powerdns/powerdns.chart.py
new file mode 100644
index 000000000..4264621b2
--- /dev/null
+++ b/collectors/python.d.plugin/powerdns/powerdns.chart.py
@@ -0,0 +1,150 @@
+# -*- coding: utf-8 -*-
+# Description: powerdns netdata python.d module
+# Author: Ilya Mashchenko (l2isbad)
+# Author: Luke Whitworth
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from json import loads
+
+from bases.FrameworkServices.UrlService import UrlService
+
+priority = 60000
+retries = 60
+# update_every = 3
+
+ORDER = ['questions', 'cache_usage', 'cache_size', 'latency']
+CHARTS = {
+ 'questions': {
+ 'options': [None, 'PowerDNS Queries and Answers', 'count', 'questions', 'powerdns.questions', 'line'],
+ 'lines': [
+ ['udp-queries', None, 'incremental'],
+ ['udp-answers', None, 'incremental'],
+ ['tcp-queries', None, 'incremental'],
+ ['tcp-answers', None, 'incremental']
+ ]
+ },
+ 'cache_usage': {
+ 'options': [None, 'PowerDNS Cache Usage', 'count', 'cache', 'powerdns.cache_usage', 'line'],
+ 'lines': [
+ ['query-cache-hit', None, 'incremental'],
+ ['query-cache-miss', None, 'incremental'],
+ ['packetcache-hit', 'packet-cache-hit', 'incremental'],
+ ['packetcache-miss', 'packet-cache-miss', 'incremental']
+ ]
+ },
+ 'cache_size': {
+ 'options': [None, 'PowerDNS Cache Size', 'count', 'cache', 'powerdns.cache_size', 'line'],
+ 'lines': [
+ ['query-cache-size', None, 'absolute'],
+ ['packetcache-size', 'packet-cache-size', 'absolute'],
+ ['key-cache-size', None, 'absolute'],
+ ['meta-cache-size', None, 'absolute']
+ ]
+ },
+ 'latency': {
+ 'options': [None, 'PowerDNS Latency', 'microseconds', 'latency', 'powerdns.latency', 'line'],
+ 'lines': [
+ ['latency', None, 'absolute']
+ ]
+ }
+}
+
+RECURSOR_ORDER = ['questions-in', 'questions-out', 'answer-times', 'timeouts', 'drops', 'cache_usage', 'cache_size']
+
+RECURSOR_CHARTS = {
+ 'questions-in': {
+ 'options': [None, 'PowerDNS Recursor Questions In', 'count', 'questions', 'powerdns_recursor.questions-in',
+ 'line'],
+ 'lines': [
+ ['questions', None, 'incremental'],
+ ['ipv6-questions', None, 'incremental'],
+ ['tcp-questions', None, 'incremental']
+ ]
+ },
+ 'questions-out': {
+ 'options': [None, 'PowerDNS Recursor Questions Out', 'count', 'questions', 'powerdns_recursor.questions-out',
+ 'line'],
+ 'lines': [
+ ['all-outqueries', None, 'incremental'],
+ ['ipv6-outqueries', None, 'incremental'],
+ ['tcp-outqueries', None, 'incremental'],
+ ['throttled-outqueries', None, 'incremental']
+ ]
+ },
+ 'answer-times': {
+ 'options': [None, 'PowerDNS Recursor Answer Times', 'count', 'performance', 'powerdns_recursor.answer-times',
+ 'line'],
+ 'lines': [
+ ['answers-slow', None, 'incremental'],
+ ['answers0-1', None, 'incremental'],
+ ['answers1-10', None, 'incremental'],
+ ['answers10-100', None, 'incremental'],
+ ['answers100-1000', None, 'incremental']
+ ]
+ },
+ 'timeouts': {
+ 'options': [None, 'PowerDNS Recursor Questions Time', 'count', 'performance', 'powerdns_recursor.timeouts',
+ 'line'],
+ 'lines': [
+ ['outgoing-timeouts', None, 'incremental'],
+ ['outgoing4-timeouts', None, 'incremental'],
+ ['outgoing6-timeouts', None, 'incremental']
+ ]
+ },
+ 'drops': {
+ 'options': [None, 'PowerDNS Recursor Drops', 'count', 'performance', 'powerdns_recursor.drops', 'line'],
+ 'lines': [
+ ['over-capacity-drops', None, 'incremental']
+ ]
+ },
+ 'cache_usage': {
+ 'options': [None, 'PowerDNS Recursor Cache Usage', 'count', 'cache', 'powerdns_recursor.cache_usage', 'line'],
+ 'lines': [
+ ['cache-hits', None, 'incremental'],
+ ['cache-misses', None, 'incremental'],
+ ['packetcache-hits', 'packet-cache-hit', 'incremental'],
+ ['packetcache-misses', 'packet-cache-miss', 'incremental']
+ ]
+ },
+ 'cache_size': {
+ 'options': [None, 'PowerDNS Recursor Cache Size', 'count', 'cache', 'powerdns_recursor.cache_size', 'line'],
+ 'lines': [
+ ['cache-entries', None, 'absolute'],
+ ['packetcache-entries', None, 'absolute'],
+ ['negcache-entries', None, 'absolute']
+ ]
+ }
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+
+ def check(self):
+ self._manager = self._build_manager()
+ if not self._manager:
+ return None
+
+ d = self._get_data()
+ if not d:
+ return False
+
+ if is_recursor(d):
+ self.order = RECURSOR_ORDER
+ self.definitions = RECURSOR_CHARTS
+ self.module_name = 'powerdns_recursor'
+
+ return True
+
+ def _get_data(self):
+ data = self._get_raw_data()
+ if not data:
+ return None
+ return dict((d['name'], d['value']) for d in loads(data))
+
+
+def is_recursor(d):
+ return 'over-capacity-drops' in d and 'tcp-questions' in d
diff --git a/collectors/python.d.plugin/powerdns/powerdns.conf b/collectors/python.d.plugin/powerdns/powerdns.conf
new file mode 100644
index 000000000..ca6200df1
--- /dev/null
+++ b/collectors/python.d.plugin/powerdns/powerdns.conf
@@ -0,0 +1,78 @@
+# netdata python.d.plugin configuration for powerdns
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, apache also supports the following:
+#
+# url: 'URL' # the URL to fetch powerdns performance statistics
+# header:
+# X-API-Key: 'Key' # API key
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+# localhost:
+# name : 'local'
+# url : 'http://127.0.0.1:8081/api/v1/servers/localhost/statistics'
+# header:
+# X-API-Key: 'change_me'
diff --git a/collectors/python.d.plugin/proxysql/Makefile.inc b/collectors/python.d.plugin/proxysql/Makefile.inc
new file mode 100644
index 000000000..66be372ce
--- /dev/null
+++ b/collectors/python.d.plugin/proxysql/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += proxysql/proxysql.chart.py
+dist_pythonconfig_DATA += proxysql/proxysql.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += proxysql/README.md proxysql/Makefile.inc
+
diff --git a/collectors/python.d.plugin/proxysql/README.md b/collectors/python.d.plugin/proxysql/README.md
new file mode 100644
index 000000000..02388276e
--- /dev/null
+++ b/collectors/python.d.plugin/proxysql/README.md
@@ -0,0 +1,62 @@
+# proxysql
+
+This module monitors proxysql backend and frontend performance metrics.
+
+It produces:
+
+1. **Connections (frontend)**
+ * connected: number of frontend connections currently connected
+ * aborted: number of frontend connections aborted due to invalid credential or max_connections reached
+ * non_idle: number of frontend connections that are not currently idle
+ * created: number of frontend connections created
+2. **Questions (frontend)**
+ * questions: total number of queries sent from frontends
+ * slow_queries: number of queries that ran for longer than the threshold in milliseconds defined in global variable `mysql-long_query_time`
+3. **Overall Bandwith (backends)**
+ * in
+ * out
+4. **Status (backends)**
+ * Backends
+ * `1=ONLINE`: backend server is fully operational
+ * `2=SHUNNED`: backend sever is temporarily taken out of use because of either too many connection errors in a time that was too short, or replication lag exceeded the allowed threshold
+ * `3=OFFLINE_SOFT`: when a server is put into OFFLINE_SOFT mode, new incoming connections aren't accepted anymore, while the existing connections are kept until they became inactive. In other words, connections are kept in use until the current transaction is completed. This allows to gracefully detach a backend
+ * `4=OFFLINE_HARD`: when a server is put into OFFLINE_HARD mode, the existing connections are dropped, while new incoming connections aren't accepted either. This is equivalent to deleting the server from a hostgroup, or temporarily taking it out of the hostgroup for maintenance work
+ * `-1`: Unknown status
+5. **Bandwith (backends)**
+ * Backends
+ * in
+ * out
+6. **Queries (backends)**
+ * Backends
+ * queries
+7. **Latency (backends)**
+ * Backends
+ * ping time
+8. **Pool connections (backends)**
+ * Backends
+ * Used: The number of connections are currently used by ProxySQL for sending queries to the backend server.
+ * Free: The number of connections are currently free.
+ * Established/OK: The number of connections were established successfully.
+ * Error: The number of connections weren't established successfully.
+9. **Commands**
+ * Commands
+ * Count
+ * Duration (Total duration for each command)
+10. **Commands Histogram**
+ * Commands
+ * 100us, 500us, ..., 10s, inf: the total number of commands of the given type which executed within the specified time limit and the previous one.
+
+### configuration
+
+```yaml
+tcpipv4:
+ name : 'local'
+ user : 'stats'
+ pass : 'stats'
+ host : '127.0.0.1'
+ port : '6032'
+```
+
+If no configuration is given, module will fail to run.
+
+---
diff --git a/collectors/python.d.plugin/proxysql/proxysql.chart.py b/collectors/python.d.plugin/proxysql/proxysql.chart.py
new file mode 100644
index 000000000..f7e3d49f9
--- /dev/null
+++ b/collectors/python.d.plugin/proxysql/proxysql.chart.py
@@ -0,0 +1,356 @@
+# -*- coding: utf-8 -*-
+# Description: Proxysql netdata python.d module
+# Author: Ali Borhani (alibo)
+# SPDX-License-Identifier: GPL-3.0+
+
+from bases.FrameworkServices.MySQLService import MySQLService
+
+# default module values (can be overridden per job in `config`)
+# update_every = 3
+priority = 60000
+retries = 60
+
+
+def query(table, *params):
+ return 'SELECT {params} FROM {table}'.format(table=table, params=', '.join(params))
+
+
+# https://github.com/sysown/proxysql/blob/master/doc/admin_tables.md#stats_mysql_global
+QUERY_GLOBAL = query(
+ "stats_mysql_global",
+ "Variable_Name",
+ "Variable_Value"
+)
+
+# https://github.com/sysown/proxysql/blob/master/doc/admin_tables.md#stats_mysql_connection_pool
+QUERY_CONNECTION_POOL = query(
+ "stats_mysql_connection_pool",
+ "hostgroup",
+ "srv_host",
+ "srv_port",
+ "status",
+ "ConnUsed",
+ "ConnFree",
+ "ConnOK",
+ "ConnERR",
+ "Queries",
+ "Bytes_data_sent",
+ "Bytes_data_recv",
+ "Latency_us"
+)
+
+# https://github.com/sysown/proxysql/blob/master/doc/admin_tables.md#stats_mysql_commands_counters
+QUERY_COMMANDS = query(
+ "stats_mysql_commands_counters",
+ "Command",
+ "Total_Time_us",
+ "Total_cnt",
+ "cnt_100us",
+ "cnt_500us",
+ "cnt_1ms",
+ "cnt_5ms",
+ "cnt_10ms",
+ "cnt_50ms",
+ "cnt_100ms",
+ "cnt_500ms",
+ "cnt_1s",
+ "cnt_5s",
+ "cnt_10s",
+ "cnt_INFs"
+)
+
+GLOBAL_STATS = [
+ 'client_connections_aborted',
+ 'client_connections_connected',
+ 'client_connections_created',
+ 'client_connections_non_idle',
+ 'proxysql_uptime',
+ 'questions',
+ 'slow_queries'
+]
+
+CONNECTION_POOL_STATS = [
+ 'status',
+ 'connused',
+ 'connfree',
+ 'connok',
+ 'connerr',
+ 'queries',
+ 'bytes_data_sent',
+ 'bytes_data_recv',
+ 'latency_us'
+]
+
+ORDER = [
+ 'connections',
+ 'active_transactions',
+ 'questions',
+ 'pool_overall_net',
+ 'commands_count',
+ 'commands_duration',
+ 'pool_status',
+ 'pool_net',
+ 'pool_queries',
+ 'pool_latency',
+ 'pool_connection_used',
+ 'pool_connection_free',
+ 'pool_connection_ok',
+ 'pool_connection_error'
+]
+
+HISTOGRAM_ORDER = [
+ '100us',
+ '500us',
+ '1ms',
+ '5ms',
+ '10ms',
+ '50ms',
+ '100ms',
+ '500ms',
+ '1s',
+ '5s',
+ '10s',
+ 'inf'
+]
+
+STATUS = {
+ "ONLINE": 1,
+ "SHUNNED": 2,
+ "OFFLINE_SOFT": 3,
+ "OFFLINE_HARD": 4
+}
+
+CHARTS = {
+ 'pool_status': {
+ 'options': [None, 'ProxySQL Backend Status', 'status', 'status', 'proxysql.pool_status', 'line'],
+ 'lines': []
+ },
+ 'pool_net': {
+ 'options': [None, 'ProxySQL Backend Bandwidth', 'kilobits/s', 'bandwidth', 'proxysql.pool_net', 'area'],
+ 'lines': []
+ },
+ 'pool_overall_net': {
+ 'options': [None, 'ProxySQL Backend Overall Bandwidth', 'kilobits/s', 'overall_bandwidth',
+ 'proxysql.pool_overall_net', 'area'],
+ 'lines': [
+ ['bytes_data_recv', 'in', 'incremental', 8, 1024],
+ ['bytes_data_sent', 'out', 'incremental', -8, 1024]
+ ]
+ },
+ 'questions': {
+ 'options': [None, 'ProxySQL Frontend Questions', 'questions/s', 'questions', 'proxysql.questions', 'line'],
+ 'lines': [
+ ['questions', 'questions', 'incremental'],
+ ['slow_queries', 'slow_queries', 'incremental']
+ ]
+ },
+ 'pool_queries': {
+ 'options': [None, 'ProxySQL Backend Queries', 'queries/s', 'queries', 'proxysql.queries', 'line'],
+ 'lines': []
+ },
+ 'active_transactions': {
+ 'options': [None, 'ProxySQL Frontend Active Transactions', 'transactions/s', 'active_transactions',
+ 'proxysql.active_transactions', 'line'],
+ 'lines': [
+ ['active_transactions', 'active_transactions', 'absolute']
+ ]
+ },
+ 'pool_latency': {
+ 'options': [None, 'ProxySQL Backend Latency', 'ms', 'latency', 'proxysql.latency', 'line'],
+ 'lines': []
+ },
+ 'connections': {
+ 'options': [None, 'ProxySQL Frontend Connections', 'connections/s', 'connections', 'proxysql.connections',
+ 'line'],
+ 'lines': [
+ ['client_connections_connected', 'connected', 'absolute'],
+ ['client_connections_created', 'created', 'incremental'],
+ ['client_connections_aborted', 'aborted', 'incremental'],
+ ['client_connections_non_idle', 'non_idle', 'absolute']
+ ]
+ },
+ 'pool_connection_used': {
+ 'options': [None, 'ProxySQL Used Connections', 'connections', 'pool_connections',
+ 'proxysql.pool_used_connections', 'line'],
+ 'lines': []
+ },
+ 'pool_connection_free': {
+ 'options': [None, 'ProxySQL Free Connections', 'connections', 'pool_connections',
+ 'proxysql.pool_free_connections', 'line'],
+ 'lines': []
+ },
+ 'pool_connection_ok': {
+ 'options': [None, 'ProxySQL Established Connections', 'connections', 'pool_connections',
+ 'proxysql.pool_ok_connections', 'line'],
+ 'lines': []
+ },
+ 'pool_connection_error': {
+ 'options': [None, 'ProxySQL Error Connections', 'connections', 'pool_connections',
+ 'proxysql.pool_error_connections', 'line'],
+ 'lines': []
+ },
+ 'commands_count': {
+ 'options': [None, 'ProxySQL Commands', 'commands', 'commands', 'proxysql.commands_count', 'line'],
+ 'lines': []
+ },
+ 'commands_duration': {
+ 'options': [None, 'ProxySQL Commands Duration', 'ms', 'commands', 'proxysql.commands_duration', 'line'],
+ 'lines': []
+ }
+}
+
+
+class Service(MySQLService):
+ def __init__(self, configuration=None, name=None):
+ MySQLService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.queries = dict(
+ global_status=QUERY_GLOBAL,
+ connection_pool_status=QUERY_CONNECTION_POOL,
+ commands_status=QUERY_COMMANDS
+ )
+
+ def _get_data(self):
+ raw_data = self._get_raw_data(description=True)
+
+ if not raw_data:
+ return None
+
+ to_netdata = dict()
+
+ if 'global_status' in raw_data:
+ global_status = dict(raw_data['global_status'][0])
+ for key in global_status:
+ if key.lower() in GLOBAL_STATS:
+ to_netdata[key.lower()] = global_status[key]
+
+ if 'connection_pool_status' in raw_data:
+
+ to_netdata['bytes_data_recv'] = 0
+ to_netdata['bytes_data_sent'] = 0
+
+ for record in raw_data['connection_pool_status'][0]:
+ backend = self.generate_backend(record)
+ name = self.generate_backend_name(backend)
+
+ for key in backend:
+ if key in CONNECTION_POOL_STATS:
+ if key == 'status':
+ backend[key] = self.convert_status(backend[key])
+
+ if len(self.charts) > 0:
+ if (name + '_status') not in self.charts['pool_status']:
+ self.add_backend_dimensions(name)
+
+ to_netdata["{0}_{1}".format(name, key)] = backend[key]
+
+ if key == 'bytes_data_recv':
+ to_netdata['bytes_data_recv'] += int(backend[key])
+
+ if key == 'bytes_data_sent':
+ to_netdata['bytes_data_sent'] += int(backend[key])
+
+ if 'commands_status' in raw_data:
+ for record in raw_data['commands_status'][0]:
+ cmd = self.generate_command_stats(record)
+ name = cmd['name']
+
+ if len(self.charts) > 0:
+ if (name + '_count') not in self.charts['commands_count']:
+ self.add_command_dimensions(name)
+ self.add_histogram_chart(cmd)
+
+ to_netdata[name + '_count'] = cmd['count']
+ to_netdata[name + '_duration'] = cmd['duration']
+ for histogram in cmd['histogram']:
+ dimId = 'commands_histogram_{0}_{1}'.format(name, histogram)
+ to_netdata[dimId] = cmd['histogram'][histogram]
+
+ return to_netdata or None
+
+ def add_backend_dimensions(self, name):
+ self.charts['pool_status'].add_dimension([name + '_status', name, 'absolute'])
+ self.charts['pool_net'].add_dimension([name + '_bytes_data_recv', 'from_' + name, 'incremental', 8, 1024])
+ self.charts['pool_net'].add_dimension([name + '_bytes_data_sent', 'to_' + name, 'incremental', -8, 1024])
+ self.charts['pool_queries'].add_dimension([name + '_queries', name, 'incremental'])
+ self.charts['pool_latency'].add_dimension([name + '_latency_us', name, 'absolute', 1, 1000])
+ self.charts['pool_connection_used'].add_dimension([name + '_connused', name, 'absolute'])
+ self.charts['pool_connection_free'].add_dimension([name + '_connfree', name, 'absolute'])
+ self.charts['pool_connection_ok'].add_dimension([name + '_connok', name, 'incremental'])
+ self.charts['pool_connection_error'].add_dimension([name + '_connerr', name, 'incremental'])
+
+ def add_command_dimensions(self, cmd):
+ self.charts['commands_count'].add_dimension([cmd + '_count', cmd, 'incremental'])
+ self.charts['commands_duration'].add_dimension([cmd + '_duration', cmd, 'incremental', 1, 1000])
+
+ def add_histogram_chart(self, cmd):
+ chart = self.charts.add_chart(self.histogram_chart(cmd))
+
+ for histogram in HISTOGRAM_ORDER:
+ dimId = 'commands_histogram_{0}_{1}'.format(cmd['name'], histogram)
+ chart.add_dimension([dimId, histogram, 'incremental'])
+
+ @staticmethod
+ def histogram_chart(cmd):
+ return [
+ 'commands_historgram_' + cmd['name'],
+ None,
+ 'ProxySQL {0} Command Histogram'.format(cmd['name'].title()),
+ 'commands',
+ 'commands_histogram',
+ 'proxysql.commands_histogram_' + cmd['name'],
+ 'stacked'
+ ]
+
+ @staticmethod
+ def generate_backend(data):
+ return {
+ 'hostgroup': data[0],
+ 'srv_host': data[1],
+ 'srv_port': data[2],
+ 'status': data[3],
+ 'connused': data[4],
+ 'connfree': data[5],
+ 'connok': data[6],
+ 'connerr': data[7],
+ 'queries': data[8],
+ 'bytes_data_sent': data[9],
+ 'bytes_data_recv': data[10],
+ 'latency_us': data[11]
+ }
+
+ @staticmethod
+ def generate_command_stats(data):
+ return {
+ 'name': data[0].lower(),
+ 'duration': data[1],
+ 'count': data[2],
+ 'histogram': {
+ '100us': data[3],
+ '500us': data[4],
+ '1ms': data[5],
+ '5ms': data[6],
+ '10ms': data[7],
+ '50ms': data[8],
+ '100ms': data[9],
+ '500ms': data[10],
+ '1s': data[11],
+ '5s': data[12],
+ '10s': data[13],
+ 'inf': data[14]
+ }
+ }
+
+ @staticmethod
+ def generate_backend_name(backend):
+ hostgroup = backend['hostgroup'].replace(' ', '_').lower()
+ host = backend['srv_host'].replace('.', '_')
+
+ return "{0}_{1}_{2}".format(hostgroup, host, backend['srv_port'])
+
+ @staticmethod
+ def convert_status(status):
+ if status in STATUS:
+ return STATUS[status]
+ return -1
diff --git a/collectors/python.d.plugin/proxysql/proxysql.conf b/collectors/python.d.plugin/proxysql/proxysql.conf
new file mode 100644
index 000000000..d29c2e5be
--- /dev/null
+++ b/collectors/python.d.plugin/proxysql/proxysql.conf
@@ -0,0 +1,118 @@
+# netdata python.d.plugin configuration for ProxySQL
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, proxysql also supports the following:
+#
+# host: 'IP or HOSTNAME' # the host to connect to
+# port: PORT # the port to connect to
+#
+# in all cases, the following can also be set:
+#
+# user: 'username' # the proxysql username to use
+# pass: 'password' # the proxysql password to use
+#
+
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+tcp:
+ name : 'local'
+ user : 'stats'
+ pass : 'stats'
+ host : 'localhost'
+ port : '6032'
+
+tcpipv4:
+ name : 'local'
+ user : 'stats'
+ pass : 'stats'
+ host : '127.0.0.1'
+ port : '6032'
+
+tcpipv6:
+ name : 'local'
+ user : 'stats'
+ pass : 'stats'
+ host : '::1'
+ port : '6032'
+
+tcp_admin:
+ name : 'local'
+ user : 'admin'
+ pass : 'admin'
+ host : 'localhost'
+ port : '6032'
+
+tcpipv4_admin:
+ name : 'local'
+ user : 'admin'
+ pass : 'admin'
+ host : '127.0.0.1'
+ port : '6032'
+
+tcpipv6_admin:
+ name : 'local'
+ user : 'admin'
+ pass : 'admin'
+ host : '::1'
+ port : '6032'
diff --git a/collectors/python.d.plugin/puppet/Makefile.inc b/collectors/python.d.plugin/puppet/Makefile.inc
new file mode 100644
index 000000000..fe94b9254
--- /dev/null
+++ b/collectors/python.d.plugin/puppet/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += puppet/puppet.chart.py
+dist_pythonconfig_DATA += puppet/puppet.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += puppet/README.md puppet/Makefile.inc
+
diff --git a/collectors/python.d.plugin/puppet/README.md b/collectors/python.d.plugin/puppet/README.md
new file mode 100644
index 000000000..8304c831e
--- /dev/null
+++ b/collectors/python.d.plugin/puppet/README.md
@@ -0,0 +1,48 @@
+# puppet
+
+Monitor status of Puppet Server and Puppet DB.
+
+Following charts are drawn:
+
+1. **JVM Heap**
+ * committed (allocated from OS)
+ * used (actual use)
+2. **JVM Non-Heap**
+ * committed (allocated from OS)
+ * used (actual use)
+3. **CPU Usage**
+ * execution
+ * GC (taken by garbage collection)
+4. **File Descriptors**
+ * max
+ * used
+
+
+### configuration
+
+```yaml
+puppetdb:
+ url: 'https://fqdn.example.com:8081'
+ tls_cert_file: /path/to/client.crt
+ tls_key_file: /path/to/client.key
+ autodetection_retry: 1
+ retries: 3600
+
+puppetserver:
+ url: 'https://fqdn.example.com:8140'
+ autodetection_retry: 1
+ retries: 3600
+```
+
+When no configuration is given then `https://fqdn.example.com:8140` is
+tried without any retries.
+
+### notes
+
+* Exact Fully Qualified Domain Name of the node should be used.
+* Usually Puppet Server/DB startup time is VERY long. So, there should
+ be quite reasonable retry count.
+* Secure PuppetDB config may require client certificate. Not applies
+ to default PuppetDB configuration though.
+
+---
diff --git a/collectors/python.d.plugin/puppet/puppet.chart.py b/collectors/python.d.plugin/puppet/puppet.chart.py
new file mode 100644
index 000000000..5c8e48bd9
--- /dev/null
+++ b/collectors/python.d.plugin/puppet/puppet.chart.py
@@ -0,0 +1,121 @@
+# -*- coding: utf-8 -*-
+# Description: puppet netdata python.d module
+# Author: Andrey Galkin <andrey@futoin.org> (andvgal)
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# This module should work both with OpenSource and PE versions
+# of PuppetServer and PuppetDB.
+#
+# NOTE: PuppetDB may be configured to require proper TLS
+# client certificate for security reasons. Use tls_key_file
+# and tls_cert_file options then.
+#
+
+from bases.FrameworkServices.UrlService import UrlService
+from json import loads
+import socket
+
+update_every = 5
+priority = 60000
+# very long clojure-based service startup time
+retries = 180
+
+MB = 1048576
+CPU_SCALE = 1000
+ORDER = [
+ 'jvm_heap',
+ 'jvm_nonheap',
+ 'cpu',
+ 'fd_open',
+]
+CHARTS = {
+ 'jvm_heap': {
+ 'options': [None, 'JVM Heap', 'MB', 'resources', 'puppet.jvm', 'area'],
+ 'lines': [
+ ['jvm_heap_committed', 'committed', 'absolute', 1, MB],
+ ['jvm_heap_used', 'used', 'absolute', 1, MB],
+ ],
+ 'variables': [
+ ['jvm_heap_max'],
+ ['jvm_heap_init'],
+ ],
+ },
+ 'jvm_nonheap': {
+ 'options': [None, 'JVM Non-Heap', 'MB', 'resources', 'puppet.jvm', 'area'],
+ 'lines': [
+ ['jvm_nonheap_committed', 'committed', 'absolute', 1, MB],
+ ['jvm_nonheap_used', 'used', 'absolute', 1, MB],
+ ],
+ 'variables': [
+ ['jvm_nonheap_max'],
+ ['jvm_nonheap_init'],
+ ],
+ },
+ 'cpu': {
+ 'options': [None, 'CPU usage', 'percentage', 'resources', 'puppet.cpu', 'stacked'],
+ 'lines': [
+ ['cpu_time', 'execution', 'absolute', 1, CPU_SCALE],
+ ['gc_time', 'GC', 'absolute', 1, CPU_SCALE],
+ ]
+ },
+ 'fd_open': {
+ 'options': [None, 'File Descriptors', 'descriptors', 'resources', 'puppet.fdopen', 'line'],
+ 'lines': [
+ ['fd_used', 'used', 'absolute'],
+ ],
+ 'variables': [
+ ['fd_max'],
+ ],
+ },
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.url = 'https://{0}:8140'.format(socket.getfqdn())
+ self.order = ORDER
+ self.definitions = CHARTS
+
+ def _get_data(self):
+ # NOTE: there are several ways to retrieve data
+ # 1. Only PE versions:
+ # https://puppet.com/docs/pe/2018.1/api_status/status_api_metrics_endpoints.html
+ # 2. Inidividual Metrics API (JMX):
+ # https://puppet.com/docs/pe/2018.1/api_status/metrics_api.html
+ # 3. Extended status at debug level:
+ # https://puppet.com/docs/pe/2018.1/api_status/status_api_json_endpoints.html
+ #
+ # For sake of simplicity and efficiency the status one is used..
+
+ raw_data = self._get_raw_data(self.url + '/status/v1/services?level=debug')
+
+ if raw_data is None:
+ return None
+
+ raw_data = loads(raw_data)
+ data = {}
+
+ try:
+ try:
+ jvm_metrics = raw_data['status-service']['status']['experimental']['jvm-metrics']
+ except KeyError:
+ jvm_metrics = raw_data['status-service']['status']['jvm-metrics']
+
+ heap_mem = jvm_metrics['heap-memory']
+ non_heap_mem = jvm_metrics['non-heap-memory']
+
+ for k in ['max', 'committed', 'used', 'init']:
+ data['jvm_heap_'+k] = heap_mem[k]
+ data['jvm_nonheap_'+k] = non_heap_mem[k]
+
+ fd_open = jvm_metrics['file-descriptors']
+ data['fd_max'] = fd_open['max']
+ data['fd_used'] = fd_open['used']
+
+ data['cpu_time'] = int(jvm_metrics['cpu-usage'] * CPU_SCALE)
+ data['gc_time'] = int(jvm_metrics['gc-cpu-usage'] * CPU_SCALE)
+ except KeyError:
+ pass
+
+ return data or None
diff --git a/collectors/python.d.plugin/puppet/puppet.conf b/collectors/python.d.plugin/puppet/puppet.conf
new file mode 100644
index 000000000..991bfabed
--- /dev/null
+++ b/collectors/python.d.plugin/puppet/puppet.conf
@@ -0,0 +1,98 @@
+# netdata python.d.plugin configuration for Puppet Server and Puppet DB
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# These configuration comes from UrlService base:
+# url: # HTTP or HTTPS URL
+# tls_verify: False # Control HTTPS server certificate verification
+# tls_ca_file: # Optional CA (bundle) file to use
+# tls_cert_file: # Optional client certificate file
+# tls_key_file: # Optional client key file
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+# puppet:
+# url: 'https://<FQDN>:8140'
+#
+
+#
+# Production configuration should look like below.
+#
+# NOTE: usually Puppet Server/DB startup time is VERY long. So, there should
+# be quite reasonable retry count.
+#
+# NOTE: secure PuppetDB config may require client certificate.
+# Not applies to default PuppetDB configuration though.
+#
+# puppetdb:
+# url: 'https://fqdn.example.com:8081'
+# tls_cert_file: /path/to/client.crt
+# tls_key_file: /path/to/client.key
+# autodetection_retry: 1
+# retries: 3600
+#
+# puppetserver:
+# url: 'https://fqdn.example.com:8140'
+# autodetection_retry: 1
+# retries: 3600
+#
diff --git a/collectors/python.d.plugin/python.d.conf b/collectors/python.d.plugin/python.d.conf
new file mode 100644
index 000000000..97f4cb8d5
--- /dev/null
+++ b/collectors/python.d.plugin/python.d.conf
@@ -0,0 +1,97 @@
+# netdata python.d.plugin configuration
+#
+# This file is in YaML format.
+# Generally the format is:
+#
+# name: value
+#
+
+# Enable / disable the whole python.d.plugin (all its modules)
+enabled: yes
+
+# ----------------------------------------------------------------------
+# Enable / Disable python.d.plugin modules
+#default_run: yes
+#
+# If "default_run" = "yes" the default for all modules is enabled (yes).
+# Setting any of these to "no" will disable it.
+#
+# If "default_run" = "no" the default for all modules is disabled (no).
+# Setting any of these to "yes" will enable it.
+
+# Enable / Disable explicit garbage collection (full collection run). Default is enabled.
+gc_run: yes
+
+# Garbage collection interval in seconds. Default is 300.
+gc_interval: 300
+
+# apache: yes
+
+# apache_cache has been replaced by web_log
+apache_cache: no
+# beanstalk: yes
+# bind_rndc: yes
+# boinc: yes
+# ceph: yes
+chrony: no
+# couchdb: yes
+# cpufreq: yes
+# cpuidle: yes
+# dns_query_time: yes
+# dnsdist: yes
+# dovecot: yes
+# elasticsearch: yes
+
+# this is just an example
+example: no
+
+# exim: yes
+# fail2ban: yes
+# freeradius: yes
+go_expvar: no
+
+# gunicorn_log has been replaced by web_log
+gunicorn_log: no
+# haproxy: yes
+# hddtemp: yes
+# icecast: yes
+# ipfs: yes
+# isc_dhcpd: yes
+# linux_power_supply: yes
+# litespeed: yes
+logind: no
+# mdstat: yes
+# memcached: yes
+# mongodb: yes
+# monit: yes
+# mysql: yes
+# nginx: yes
+# nginx_plus: yes
+
+# nginx_log has been replaced by web_log
+nginx_log: no
+# nsd: yes
+# ntpd: yes
+# ovpn_status_log: yes
+# phpfpm: yes
+# postfix: yes
+# postgres: yes
+# powerdns: yes
+# proxysql: yes
+# puppet: yes
+# rabbitmq: yes
+# redis: yes
+# rethinkdbs: yes
+# retroshare: yes
+# samba: yes
+# sensors: yes
+# smartd_log: yes
+# spigotmc: yes
+# springboot: yes
+# squid: yes
+# tomcat: yes
+unbound: no
+# uwsgi: yes
+# varnish: yes
+# w1sensor: yes
+# web_log: yes
diff --git a/collectors/python.d.plugin/python.d.plugin b/collectors/python.d.plugin/python.d.plugin
new file mode 100644
index 000000000..264c3383d
--- /dev/null
+++ b/collectors/python.d.plugin/python.d.plugin
@@ -0,0 +1,427 @@
+#!/usr/bin/env bash
+'''':; exec "$(command -v python || command -v python3 || command -v python2 ||
+echo "ERROR python IS NOT AVAILABLE IN THIS SYSTEM")" "$0" "$@" # '''
+
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import gc
+import os
+import sys
+import threading
+
+from re import sub
+from sys import version_info, argv
+from time import sleep
+
+GC_RUN = True
+GC_COLLECT_EVERY = 300
+
+PY_VERSION = version_info[:2]
+
+USER_CONFIG_DIR = os.getenv('NETDATA_USER_CONFIG_DIR', '/usr/local/etc/netdata')
+STOCK_CONFIG_DIR = os.getenv('NETDATA_STOCK_CONFIG_DIR', '/usr/local/lib/netdata/conf.d')
+
+PLUGINS_USER_CONFIG_DIR = os.path.join(USER_CONFIG_DIR, 'python.d')
+PLUGINS_STOCK_CONFIG_DIR = os.path.join(STOCK_CONFIG_DIR, 'python.d')
+
+
+PLUGINS_DIR = os.path.abspath(os.getenv(
+ 'NETDATA_PLUGINS_DIR',
+ os.path.dirname(__file__)) + '/../python.d')
+
+
+PYTHON_MODULES_DIR = os.path.join(PLUGINS_DIR, 'python_modules')
+
+sys.path.append(PYTHON_MODULES_DIR)
+
+from bases.loaders import ModuleAndConfigLoader # noqa: E402
+from bases.loggers import PythonDLogger # noqa: E402
+from bases.collection import setdefault_values, run_and_exit # noqa: E402
+
+try:
+ from collections import OrderedDict
+except ImportError:
+ from third_party.ordereddict import OrderedDict
+
+BASE_CONFIG = {'update_every': os.getenv('NETDATA_UPDATE_EVERY', 1),
+ 'retries': 60,
+ 'priority': 60000,
+ 'autodetection_retry': 0,
+ 'chart_cleanup': 10,
+ 'name': str()}
+
+
+MODULE_EXTENSION = '.chart.py'
+OBSOLETE_MODULES = ['apache_cache', 'gunicorn_log', 'nginx_log']
+
+
+def module_ok(m):
+ return m.endswith(MODULE_EXTENSION) and m[:-len(MODULE_EXTENSION)] not in OBSOLETE_MODULES
+
+
+ALL_MODULES = [m for m in sorted(os.listdir(PLUGINS_DIR)) if module_ok(m)]
+
+
+def parse_cmd():
+ debug = 'debug' in argv[1:]
+ trace = 'trace' in argv[1:]
+ override_update_every = next((arg for arg in argv[1:] if arg.isdigit() and int(arg) > 1), False)
+ modules = [''.join([m, MODULE_EXTENSION]) for m in argv[1:] if ''.join([m, MODULE_EXTENSION]) in ALL_MODULES]
+ return debug, trace, override_update_every, modules or ALL_MODULES
+
+
+def multi_job_check(config):
+ return next((True for key in config if isinstance(config[key], dict)), False)
+
+
+class RawModule:
+ def __init__(self, name, path, explicitly_enabled=True):
+ self.name = name
+ self.path = path
+ self.explicitly_enabled = explicitly_enabled
+
+
+class Job(object):
+ def __init__(self, initialized_job, job_id):
+ """
+ :param initialized_job: instance of <Class Service>
+ :param job_id: <str>
+ """
+ self.job = initialized_job
+ self.id = job_id # key in Modules.jobs()
+ self.module_name = self.job.__module__ # used in Plugin.delete_job()
+ self.recheck_every = self.job.configuration.pop('autodetection_retry')
+ self.checked = False # used in Plugin.check_job()
+ self.created = False # used in Plugin.create_job_charts()
+ if self.job.update_every < int(OVERRIDE_UPDATE_EVERY):
+ self.job.update_every = int(OVERRIDE_UPDATE_EVERY)
+
+ def __getattr__(self, item):
+ return getattr(self.job, item)
+
+ def __repr__(self):
+ return self.job.__repr__()
+
+ def is_dead(self):
+ return bool(self.ident) and not self.is_alive()
+
+ def not_launched(self):
+ return not bool(self.ident)
+
+ def is_autodetect(self):
+ return self.recheck_every
+
+
+class Module(object):
+ def __init__(self, service, config):
+ """
+ :param service: <Module>
+ :param config: <dict>
+ """
+ self.service = service
+ self.name = service.__name__
+ self.config = self.jobs_configurations_builder(config)
+ self.jobs = OrderedDict()
+ self.counter = 1
+
+ self.initialize_jobs()
+
+ def __repr__(self):
+ return "<Class Module '{name}'>".format(name=self.name)
+
+ def __iter__(self):
+ return iter(OrderedDict(self.jobs).values())
+
+ def __getitem__(self, item):
+ return self.jobs[item]
+
+ def __delitem__(self, key):
+ del self.jobs[key]
+
+ def __len__(self):
+ return len(self.jobs)
+
+ def __bool__(self):
+ return bool(self.jobs)
+
+ def __nonzero__(self):
+ return self.__bool__()
+
+ def jobs_configurations_builder(self, config):
+ """
+ :param config: <dict>
+ :return:
+ """
+ counter = 0
+ job_base_config = dict()
+
+ for attr in BASE_CONFIG:
+ job_base_config[attr] = config.pop(attr, getattr(self.service, attr, BASE_CONFIG[attr]))
+
+ if not config:
+ config = {str(): dict()}
+ elif not multi_job_check(config):
+ config = {str(): config}
+
+ for job_name in config:
+ if not isinstance(config[job_name], dict):
+ continue
+
+ job_config = setdefault_values(config[job_name], base_dict=job_base_config)
+ job_name = sub(r'\s+', '_', job_name)
+ config[job_name]['name'] = sub(r'\s+', '_', config[job_name]['name'])
+ counter += 1
+ job_id = 'job' + str(counter).zfill(3)
+
+ yield job_id, job_name, job_config
+
+ def initialize_jobs(self):
+ """
+ :return:
+ """
+ for job_id, job_name, job_config in self.config:
+ job_config['job_name'] = job_name
+ job_config['override_name'] = job_config.pop('name')
+
+ try:
+ initialized_job = self.service.Service(configuration=job_config)
+ except Exception as error:
+ Logger.error("job initialization: '{module_name} {job_name}' "
+ "=> ['FAILED'] ({error})".format(module_name=self.name,
+ job_name=job_name,
+ error=error))
+ continue
+ else:
+ Logger.debug("job initialization: '{module_name} {job_name}' "
+ "=> ['OK']".format(module_name=self.name,
+ job_name=job_name or self.name))
+ self.jobs[job_id] = Job(initialized_job=initialized_job,
+ job_id=job_id)
+ del self.config
+ del self.service
+
+
+class Plugin(object):
+ def __init__(self):
+ self.loader = ModuleAndConfigLoader()
+ self.modules = OrderedDict()
+ self.sleep_time = 1
+ self.runs_counter = 0
+
+ user_config = os.path.join(USER_CONFIG_DIR, 'python.d.conf')
+ stock_config = os.path.join(STOCK_CONFIG_DIR, 'python.d.conf')
+
+ Logger.debug("loading '{0}'".format(user_config))
+ self.config, error = self.loader.load_config_from_file(user_config)
+
+ if error:
+ Logger.error("cannot load '{0}': {1}. Will try stock version.".format(user_config, error))
+ Logger.debug("loading '{0}'".format(stock_config))
+ self.config, error = self.loader.load_config_from_file(stock_config)
+ if error:
+ Logger.error("cannot load '{0}': {1}".format(stock_config, error))
+
+ self.do_gc = self.config.get("gc_run", GC_RUN)
+ self.gc_interval = self.config.get("gc_interval", GC_COLLECT_EVERY)
+
+ if not self.config.get('enabled', True):
+ run_and_exit(Logger.info)('DISABLED in configuration file.')
+
+ self.load_and_initialize_modules()
+ if not self.modules:
+ run_and_exit(Logger.info)('No modules to run. Exit...')
+
+ def __iter__(self):
+ return iter(OrderedDict(self.modules).values())
+
+ @property
+ def jobs(self):
+ return (job for mod in self for job in mod)
+
+ @property
+ def dead_jobs(self):
+ return (job for job in self.jobs if job.is_dead())
+
+ @property
+ def autodetect_jobs(self):
+ return [job for job in self.jobs if job.not_launched()]
+
+ def enabled_modules(self):
+ for mod in MODULES_TO_RUN:
+ mod_name = mod[:-len(MODULE_EXTENSION)]
+ mod_path = os.path.join(PLUGINS_DIR, mod)
+ if any(
+ [
+ self.config.get('default_run', True) and self.config.get(mod_name, True),
+ (not self.config.get('default_run')) and self.config.get(mod_name),
+ ]
+ ):
+ yield RawModule(
+ name=mod_name,
+ path=mod_path,
+ explicitly_enabled=self.config.get(mod_name),
+ )
+
+ def load_and_initialize_modules(self):
+ for mod in self.enabled_modules():
+
+ # Load module from file ------------------------------------------------------------
+ loaded_module, error = self.loader.load_module_from_file(mod.name, mod.path)
+ log = Logger.error if error else Logger.debug
+ log("module load source: '{module_name}' => [{status}]".format(status='FAILED' if error else 'OK',
+ module_name=mod.name))
+ if error:
+ Logger.error("load source error : {0}".format(error))
+ continue
+
+ # Load module config from file ------------------------------------------------------
+ user_config = os.path.join(PLUGINS_USER_CONFIG_DIR, mod.name + '.conf')
+ stock_config = os.path.join(PLUGINS_STOCK_CONFIG_DIR, mod.name + '.conf')
+
+ Logger.debug("loading '{0}'".format(user_config))
+ loaded_config, error = self.loader.load_config_from_file(user_config)
+ if error:
+ Logger.error("cannot load '{0}' : {1}. Will try stock version.".format(user_config, error))
+ Logger.debug("loading '{0}'".format(stock_config))
+ loaded_config, error = self.loader.load_config_from_file(stock_config)
+
+ if error:
+ Logger.error("cannot load '{0}': {1}".format(stock_config, error))
+
+ # Skip disabled modules
+ if getattr(loaded_module, 'disabled_by_default', False) and not mod.explicitly_enabled:
+ Logger.info("module '{0}' disabled by default".format(loaded_module.__name__))
+ continue
+
+ # Module initialization ---------------------------------------------------
+
+ initialized_module = Module(service=loaded_module, config=loaded_config)
+ Logger.debug("module status: '{module_name}' => [{status}] "
+ "(jobs: {jobs_number})".format(status='OK' if initialized_module else 'FAILED',
+ module_name=initialized_module.name,
+ jobs_number=len(initialized_module)))
+ if initialized_module:
+ self.modules[initialized_module.name] = initialized_module
+
+ @staticmethod
+ def check_job(job):
+ """
+ :param job: <Job>
+ :return:
+ """
+ try:
+ check_ok = bool(job.check())
+ except Exception as error:
+ job.error('check() unhandled exception: {error}'.format(error=error))
+ return None
+ else:
+ return check_ok
+
+ @staticmethod
+ def create_job_charts(job):
+ """
+ :param job: <Job>
+ :return:
+ """
+ try:
+ create_ok = job.create()
+ except Exception as error:
+ job.error('create() unhandled exception: {error}'.format(error=error))
+ return False
+ else:
+ return create_ok
+
+ def delete_job(self, job):
+ """
+ :param job: <Job>
+ :return:
+ """
+ del self.modules[job.module_name][job.id]
+
+ def run_check(self):
+ checked = list()
+ for job in self.jobs:
+ if job.name in checked:
+ job.info('check() => [DROPPED] (already served by another job)')
+ self.delete_job(job)
+ continue
+ ok = self.check_job(job)
+ if ok:
+ job.info('check() => [OK]')
+ checked.append(job.name)
+ job.checked = True
+ continue
+ if not job.is_autodetect() or ok is None:
+ job.info('check() => [FAILED]')
+ self.delete_job(job)
+ else:
+ job.info('check() => [RECHECK] (autodetection_retry: {0})'.format(job.recheck_every))
+
+ def run_create(self):
+ for job in self.jobs:
+ if not job.checked:
+ # skip autodetection_retry jobs
+ continue
+ ok = self.create_job_charts(job)
+ if ok:
+ job.debug('create() => [OK] (charts: {0})'.format(len(job.charts)))
+ job.created = True
+ continue
+ job.error('create() => [FAILED] (charts: {0})'.format(len(job.charts)))
+ self.delete_job(job)
+
+ def start(self):
+ self.run_check()
+ self.run_create()
+ for job in self.jobs:
+ if job.created:
+ job.start()
+
+ while True:
+ if threading.active_count() <= 1 and not self.autodetect_jobs:
+ run_and_exit(Logger.info)('FINISHED')
+
+ sleep(self.sleep_time)
+ self.cleanup()
+ self.autodetect_retry()
+
+ # FIXME: https://github.com/netdata/netdata/issues/3817
+ if self.do_gc and self.runs_counter % self.gc_interval == 0:
+ v = gc.collect()
+ Logger.debug("GC full collection run result: {0}".format(v))
+
+ def cleanup(self):
+ for job in self.dead_jobs:
+ self.delete_job(job)
+ for mod in self:
+ if not mod:
+ del self.modules[mod.name]
+
+ def autodetect_retry(self):
+ self.runs_counter += self.sleep_time
+ for job in self.autodetect_jobs:
+ if self.runs_counter % job.recheck_every == 0:
+ checked = self.check_job(job)
+ if checked:
+ created = self.create_job_charts(job)
+ if not created:
+ self.delete_job(job)
+ continue
+ job.start()
+
+
+if __name__ == '__main__':
+ DEBUG, TRACE, OVERRIDE_UPDATE_EVERY, MODULES_TO_RUN = parse_cmd()
+ Logger = PythonDLogger()
+ if DEBUG:
+ Logger.logger.severity = 'DEBUG'
+ if TRACE:
+ Logger.log_traceback = True
+ Logger.info('Using python {version}'.format(version=PY_VERSION[0]))
+
+ plugin = Plugin()
+ plugin.start()
diff --git a/collectors/python.d.plugin/python.d.plugin.in b/collectors/python.d.plugin/python.d.plugin.in
new file mode 100755
index 000000000..7ac03fd99
--- /dev/null
+++ b/collectors/python.d.plugin/python.d.plugin.in
@@ -0,0 +1,427 @@
+#!/usr/bin/env bash
+'''':; exec "$(command -v python || command -v python3 || command -v python2 ||
+echo "ERROR python IS NOT AVAILABLE IN THIS SYSTEM")" "$0" "$@" # '''
+
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import gc
+import os
+import sys
+import threading
+
+from re import sub
+from sys import version_info, argv
+from time import sleep
+
+GC_RUN = True
+GC_COLLECT_EVERY = 300
+
+PY_VERSION = version_info[:2]
+
+USER_CONFIG_DIR = os.getenv('NETDATA_USER_CONFIG_DIR', '@configdir_POST@')
+STOCK_CONFIG_DIR = os.getenv('NETDATA_STOCK_CONFIG_DIR', '@libconfigdir_POST@')
+
+PLUGINS_USER_CONFIG_DIR = os.path.join(USER_CONFIG_DIR, 'python.d')
+PLUGINS_STOCK_CONFIG_DIR = os.path.join(STOCK_CONFIG_DIR, 'python.d')
+
+
+PLUGINS_DIR = os.path.abspath(os.getenv(
+ 'NETDATA_PLUGINS_DIR',
+ os.path.dirname(__file__)) + '/../python.d')
+
+
+PYTHON_MODULES_DIR = os.path.join(PLUGINS_DIR, 'python_modules')
+
+sys.path.append(PYTHON_MODULES_DIR)
+
+from bases.loaders import ModuleAndConfigLoader # noqa: E402
+from bases.loggers import PythonDLogger # noqa: E402
+from bases.collection import setdefault_values, run_and_exit # noqa: E402
+
+try:
+ from collections import OrderedDict
+except ImportError:
+ from third_party.ordereddict import OrderedDict
+
+BASE_CONFIG = {'update_every': os.getenv('NETDATA_UPDATE_EVERY', 1),
+ 'retries': 60,
+ 'priority': 60000,
+ 'autodetection_retry': 0,
+ 'chart_cleanup': 10,
+ 'name': str()}
+
+
+MODULE_EXTENSION = '.chart.py'
+OBSOLETE_MODULES = ['apache_cache', 'gunicorn_log', 'nginx_log']
+
+
+def module_ok(m):
+ return m.endswith(MODULE_EXTENSION) and m[:-len(MODULE_EXTENSION)] not in OBSOLETE_MODULES
+
+
+ALL_MODULES = [m for m in sorted(os.listdir(PLUGINS_DIR)) if module_ok(m)]
+
+
+def parse_cmd():
+ debug = 'debug' in argv[1:]
+ trace = 'trace' in argv[1:]
+ override_update_every = next((arg for arg in argv[1:] if arg.isdigit() and int(arg) > 1), False)
+ modules = [''.join([m, MODULE_EXTENSION]) for m in argv[1:] if ''.join([m, MODULE_EXTENSION]) in ALL_MODULES]
+ return debug, trace, override_update_every, modules or ALL_MODULES
+
+
+def multi_job_check(config):
+ return next((True for key in config if isinstance(config[key], dict)), False)
+
+
+class RawModule:
+ def __init__(self, name, path, explicitly_enabled=True):
+ self.name = name
+ self.path = path
+ self.explicitly_enabled = explicitly_enabled
+
+
+class Job(object):
+ def __init__(self, initialized_job, job_id):
+ """
+ :param initialized_job: instance of <Class Service>
+ :param job_id: <str>
+ """
+ self.job = initialized_job
+ self.id = job_id # key in Modules.jobs()
+ self.module_name = self.job.__module__ # used in Plugin.delete_job()
+ self.recheck_every = self.job.configuration.pop('autodetection_retry')
+ self.checked = False # used in Plugin.check_job()
+ self.created = False # used in Plugin.create_job_charts()
+ if self.job.update_every < int(OVERRIDE_UPDATE_EVERY):
+ self.job.update_every = int(OVERRIDE_UPDATE_EVERY)
+
+ def __getattr__(self, item):
+ return getattr(self.job, item)
+
+ def __repr__(self):
+ return self.job.__repr__()
+
+ def is_dead(self):
+ return bool(self.ident) and not self.is_alive()
+
+ def not_launched(self):
+ return not bool(self.ident)
+
+ def is_autodetect(self):
+ return self.recheck_every
+
+
+class Module(object):
+ def __init__(self, service, config):
+ """
+ :param service: <Module>
+ :param config: <dict>
+ """
+ self.service = service
+ self.name = service.__name__
+ self.config = self.jobs_configurations_builder(config)
+ self.jobs = OrderedDict()
+ self.counter = 1
+
+ self.initialize_jobs()
+
+ def __repr__(self):
+ return "<Class Module '{name}'>".format(name=self.name)
+
+ def __iter__(self):
+ return iter(OrderedDict(self.jobs).values())
+
+ def __getitem__(self, item):
+ return self.jobs[item]
+
+ def __delitem__(self, key):
+ del self.jobs[key]
+
+ def __len__(self):
+ return len(self.jobs)
+
+ def __bool__(self):
+ return bool(self.jobs)
+
+ def __nonzero__(self):
+ return self.__bool__()
+
+ def jobs_configurations_builder(self, config):
+ """
+ :param config: <dict>
+ :return:
+ """
+ counter = 0
+ job_base_config = dict()
+
+ for attr in BASE_CONFIG:
+ job_base_config[attr] = config.pop(attr, getattr(self.service, attr, BASE_CONFIG[attr]))
+
+ if not config:
+ config = {str(): dict()}
+ elif not multi_job_check(config):
+ config = {str(): config}
+
+ for job_name in config:
+ if not isinstance(config[job_name], dict):
+ continue
+
+ job_config = setdefault_values(config[job_name], base_dict=job_base_config)
+ job_name = sub(r'\s+', '_', job_name)
+ config[job_name]['name'] = sub(r'\s+', '_', config[job_name]['name'])
+ counter += 1
+ job_id = 'job' + str(counter).zfill(3)
+
+ yield job_id, job_name, job_config
+
+ def initialize_jobs(self):
+ """
+ :return:
+ """
+ for job_id, job_name, job_config in self.config:
+ job_config['job_name'] = job_name
+ job_config['override_name'] = job_config.pop('name')
+
+ try:
+ initialized_job = self.service.Service(configuration=job_config)
+ except Exception as error:
+ Logger.error("job initialization: '{module_name} {job_name}' "
+ "=> ['FAILED'] ({error})".format(module_name=self.name,
+ job_name=job_name,
+ error=error))
+ continue
+ else:
+ Logger.debug("job initialization: '{module_name} {job_name}' "
+ "=> ['OK']".format(module_name=self.name,
+ job_name=job_name or self.name))
+ self.jobs[job_id] = Job(initialized_job=initialized_job,
+ job_id=job_id)
+ del self.config
+ del self.service
+
+
+class Plugin(object):
+ def __init__(self):
+ self.loader = ModuleAndConfigLoader()
+ self.modules = OrderedDict()
+ self.sleep_time = 1
+ self.runs_counter = 0
+
+ user_config = os.path.join(USER_CONFIG_DIR, 'python.d.conf')
+ stock_config = os.path.join(STOCK_CONFIG_DIR, 'python.d.conf')
+
+ Logger.debug("loading '{0}'".format(user_config))
+ self.config, error = self.loader.load_config_from_file(user_config)
+
+ if error:
+ Logger.error("cannot load '{0}': {1}. Will try stock version.".format(user_config, error))
+ Logger.debug("loading '{0}'".format(stock_config))
+ self.config, error = self.loader.load_config_from_file(stock_config)
+ if error:
+ Logger.error("cannot load '{0}': {1}".format(stock_config, error))
+
+ self.do_gc = self.config.get("gc_run", GC_RUN)
+ self.gc_interval = self.config.get("gc_interval", GC_COLLECT_EVERY)
+
+ if not self.config.get('enabled', True):
+ run_and_exit(Logger.info)('DISABLED in configuration file.')
+
+ self.load_and_initialize_modules()
+ if not self.modules:
+ run_and_exit(Logger.info)('No modules to run. Exit...')
+
+ def __iter__(self):
+ return iter(OrderedDict(self.modules).values())
+
+ @property
+ def jobs(self):
+ return (job for mod in self for job in mod)
+
+ @property
+ def dead_jobs(self):
+ return (job for job in self.jobs if job.is_dead())
+
+ @property
+ def autodetect_jobs(self):
+ return [job for job in self.jobs if job.not_launched()]
+
+ def enabled_modules(self):
+ for mod in MODULES_TO_RUN:
+ mod_name = mod[:-len(MODULE_EXTENSION)]
+ mod_path = os.path.join(PLUGINS_DIR, mod)
+ if any(
+ [
+ self.config.get('default_run', True) and self.config.get(mod_name, True),
+ (not self.config.get('default_run')) and self.config.get(mod_name),
+ ]
+ ):
+ yield RawModule(
+ name=mod_name,
+ path=mod_path,
+ explicitly_enabled=self.config.get(mod_name),
+ )
+
+ def load_and_initialize_modules(self):
+ for mod in self.enabled_modules():
+
+ # Load module from file ------------------------------------------------------------
+ loaded_module, error = self.loader.load_module_from_file(mod.name, mod.path)
+ log = Logger.error if error else Logger.debug
+ log("module load source: '{module_name}' => [{status}]".format(status='FAILED' if error else 'OK',
+ module_name=mod.name))
+ if error:
+ Logger.error("load source error : {0}".format(error))
+ continue
+
+ # Load module config from file ------------------------------------------------------
+ user_config = os.path.join(PLUGINS_USER_CONFIG_DIR, mod.name + '.conf')
+ stock_config = os.path.join(PLUGINS_STOCK_CONFIG_DIR, mod.name + '.conf')
+
+ Logger.debug("loading '{0}'".format(user_config))
+ loaded_config, error = self.loader.load_config_from_file(user_config)
+ if error:
+ Logger.error("cannot load '{0}' : {1}. Will try stock version.".format(user_config, error))
+ Logger.debug("loading '{0}'".format(stock_config))
+ loaded_config, error = self.loader.load_config_from_file(stock_config)
+
+ if error:
+ Logger.error("cannot load '{0}': {1}".format(stock_config, error))
+
+ # Skip disabled modules
+ if getattr(loaded_module, 'disabled_by_default', False) and not mod.explicitly_enabled:
+ Logger.info("module '{0}' disabled by default".format(loaded_module.__name__))
+ continue
+
+ # Module initialization ---------------------------------------------------
+
+ initialized_module = Module(service=loaded_module, config=loaded_config)
+ Logger.debug("module status: '{module_name}' => [{status}] "
+ "(jobs: {jobs_number})".format(status='OK' if initialized_module else 'FAILED',
+ module_name=initialized_module.name,
+ jobs_number=len(initialized_module)))
+ if initialized_module:
+ self.modules[initialized_module.name] = initialized_module
+
+ @staticmethod
+ def check_job(job):
+ """
+ :param job: <Job>
+ :return:
+ """
+ try:
+ check_ok = bool(job.check())
+ except Exception as error:
+ job.error('check() unhandled exception: {error}'.format(error=error))
+ return None
+ else:
+ return check_ok
+
+ @staticmethod
+ def create_job_charts(job):
+ """
+ :param job: <Job>
+ :return:
+ """
+ try:
+ create_ok = job.create()
+ except Exception as error:
+ job.error('create() unhandled exception: {error}'.format(error=error))
+ return False
+ else:
+ return create_ok
+
+ def delete_job(self, job):
+ """
+ :param job: <Job>
+ :return:
+ """
+ del self.modules[job.module_name][job.id]
+
+ def run_check(self):
+ checked = list()
+ for job in self.jobs:
+ if job.name in checked:
+ job.info('check() => [DROPPED] (already served by another job)')
+ self.delete_job(job)
+ continue
+ ok = self.check_job(job)
+ if ok:
+ job.info('check() => [OK]')
+ checked.append(job.name)
+ job.checked = True
+ continue
+ if not job.is_autodetect() or ok is None:
+ job.info('check() => [FAILED]')
+ self.delete_job(job)
+ else:
+ job.info('check() => [RECHECK] (autodetection_retry: {0})'.format(job.recheck_every))
+
+ def run_create(self):
+ for job in self.jobs:
+ if not job.checked:
+ # skip autodetection_retry jobs
+ continue
+ ok = self.create_job_charts(job)
+ if ok:
+ job.debug('create() => [OK] (charts: {0})'.format(len(job.charts)))
+ job.created = True
+ continue
+ job.error('create() => [FAILED] (charts: {0})'.format(len(job.charts)))
+ self.delete_job(job)
+
+ def start(self):
+ self.run_check()
+ self.run_create()
+ for job in self.jobs:
+ if job.created:
+ job.start()
+
+ while True:
+ if threading.active_count() <= 1 and not self.autodetect_jobs:
+ run_and_exit(Logger.info)('FINISHED')
+
+ sleep(self.sleep_time)
+ self.cleanup()
+ self.autodetect_retry()
+
+ # FIXME: https://github.com/netdata/netdata/issues/3817
+ if self.do_gc and self.runs_counter % self.gc_interval == 0:
+ v = gc.collect()
+ Logger.debug("GC full collection run result: {0}".format(v))
+
+ def cleanup(self):
+ for job in self.dead_jobs:
+ self.delete_job(job)
+ for mod in self:
+ if not mod:
+ del self.modules[mod.name]
+
+ def autodetect_retry(self):
+ self.runs_counter += self.sleep_time
+ for job in self.autodetect_jobs:
+ if self.runs_counter % job.recheck_every == 0:
+ checked = self.check_job(job)
+ if checked:
+ created = self.create_job_charts(job)
+ if not created:
+ self.delete_job(job)
+ continue
+ job.start()
+
+
+if __name__ == '__main__':
+ DEBUG, TRACE, OVERRIDE_UPDATE_EVERY, MODULES_TO_RUN = parse_cmd()
+ Logger = PythonDLogger()
+ if DEBUG:
+ Logger.logger.severity = 'DEBUG'
+ if TRACE:
+ Logger.log_traceback = True
+ Logger.info('Using python {version}'.format(version=PY_VERSION[0]))
+
+ plugin = Plugin()
+ plugin.start()
diff --git a/collectors/python.d.plugin/python_modules/__init__.py b/collectors/python.d.plugin/python_modules/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/__init__.py
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py
new file mode 100644
index 000000000..72f9ff714
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py
@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import os
+
+from subprocess import Popen, PIPE
+
+from bases.FrameworkServices.SimpleService import SimpleService
+from bases.collection import find_binary
+
+
+class ExecutableService(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.command = None
+
+ def _get_raw_data(self, stderr=False, command=None):
+ """
+ Get raw data from executed command
+ :return: <list>
+ """
+ try:
+ p = Popen(command if command else self.command, stdout=PIPE, stderr=PIPE)
+ except Exception as error:
+ self.error('Executing command {command} resulted in error: {error}'.format(command=command or self.command,
+ error=error))
+ return None
+ data = list()
+ std = p.stderr if stderr else p.stdout
+ for line in std:
+ try:
+ data.append(line.decode('utf-8'))
+ except TypeError:
+ continue
+
+ return data
+
+ def check(self):
+ """
+ Parse basic configuration, check if command is whitelisted and is returning values
+ :return: <boolean>
+ """
+ # Preference: 1. "command" from configuration file 2. "command" from plugin (if specified)
+ if 'command' in self.configuration:
+ self.command = self.configuration['command']
+
+ # "command" must be: 1.not None 2. type <str>
+ if not (self.command and isinstance(self.command, str)):
+ self.error('Command is not defined or command type is not <str>')
+ return False
+
+ # Split "command" into: 1. command <str> 2. options <list>
+ command, opts = self.command.split()[0], self.command.split()[1:]
+
+ # Check for "bad" symbols in options. No pipes, redirects etc.
+ opts_list = ['&', '|', ';', '>', '<']
+ bad_opts = set(''.join(opts)) & set(opts_list)
+ if bad_opts:
+ self.error("Bad command argument(s): {opts}".format(opts=bad_opts))
+ return False
+
+ # Find absolute path ('echo' => '/bin/echo')
+ if '/' not in command:
+ command = find_binary(command)
+ if not command:
+ self.error('Can\'t locate "{command}" binary'.format(command=self.command))
+ return False
+ # Check if binary exist and executable
+ else:
+ if not os.access(command, os.X_OK):
+ self.error('"{binary}" is not executable'.format(binary=command))
+ return False
+
+ self.command = [command] + opts if opts else [command]
+
+ try:
+ data = self._get_data()
+ except Exception as error:
+ self.error('_get_data() failed. Command: {command}. Error: {error}'.format(command=self.command,
+ error=error))
+ return False
+
+ if isinstance(data, dict) and data:
+ return True
+ self.error('Command "{command}" returned no data'.format(command=self.command))
+ return False
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/LogService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/LogService.py
new file mode 100644
index 000000000..5acfd73f8
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/LogService.py
@@ -0,0 +1,80 @@
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from glob import glob
+import os
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+
+class LogService(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.log_path = self.configuration.get('path')
+ self.__glob_path = self.log_path
+ self._last_position = 0
+ self.__re_find = dict(current=0, run=0, maximum=60)
+
+ def _get_raw_data(self):
+ """
+ Get log lines since last poll
+ :return: list
+ """
+ lines = list()
+ try:
+ if self.__re_find['current'] == self.__re_find['run']:
+ self._find_recent_log_file()
+ size = os.path.getsize(self.log_path)
+ if size == self._last_position:
+ self.__re_find['current'] += 1
+ return list() # return empty list if nothing has changed
+ elif size < self._last_position:
+ self._last_position = 0 # read from beginning if file has shrunk
+
+ with open(self.log_path) as fp:
+ fp.seek(self._last_position)
+ for line in fp:
+ lines.append(line)
+ self._last_position = fp.tell()
+ self.__re_find['current'] = 0
+ except (OSError, IOError) as error:
+ self.__re_find['current'] += 1
+ self.error(str(error))
+
+ return lines or None
+
+ def _find_recent_log_file(self):
+ """
+ :return:
+ """
+ self.__re_find['run'] = self.__re_find['maximum']
+ self.__re_find['current'] = 0
+ self.__glob_path = self.__glob_path or self.log_path # workaround for modules w/o config files
+ path_list = glob(self.__glob_path)
+ if path_list:
+ self.log_path = max(path_list)
+ return True
+ return False
+
+ def check(self):
+ """
+ Parse basic configuration and check if log file exists
+ :return: boolean
+ """
+ if not self.log_path:
+ self.error('No path to log specified')
+ return None
+
+ if self._find_recent_log_file() and os.access(self.log_path, os.R_OK) and os.path.isfile(self.log_path):
+ return True
+ self.error('Cannot access {0}'.format(self.log_path))
+ return False
+
+ def create(self):
+ # set cursor at last byte of log file
+ self._last_position = os.path.getsize(self.log_path)
+ status = SimpleService.create(self)
+ return status
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py
new file mode 100644
index 000000000..53807e2c4
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py
@@ -0,0 +1,159 @@
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from sys import exc_info
+
+try:
+ import MySQLdb
+
+ PY_MYSQL = True
+except ImportError:
+ try:
+ import pymysql as MySQLdb
+
+ PY_MYSQL = True
+ except ImportError:
+ PY_MYSQL = False
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+
+class MySQLService(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.__connection = None
+ self.__conn_properties = dict()
+ self.extra_conn_properties = dict()
+ self.__queries = self.configuration.get('queries', dict())
+ self.queries = dict()
+
+ def __connect(self):
+ try:
+ connection = MySQLdb.connect(connect_timeout=self.update_every, **self.__conn_properties)
+ except (MySQLdb.MySQLError, TypeError, AttributeError) as error:
+ return None, str(error)
+ else:
+ return connection, None
+
+ def check(self):
+ def get_connection_properties(conf, extra_conf):
+ properties = dict()
+ if conf.get('user'):
+ properties['user'] = conf['user']
+ if conf.get('pass'):
+ properties['passwd'] = conf['pass']
+ if conf.get('socket'):
+ properties['unix_socket'] = conf['socket']
+ elif conf.get('host'):
+ properties['host'] = conf['host']
+ properties['port'] = int(conf.get('port', 3306))
+ elif conf.get('my.cnf'):
+ if MySQLdb.__name__ == 'pymysql':
+ self.error('"my.cnf" parsing is not working for pymysql')
+ else:
+ properties['read_default_file'] = conf['my.cnf']
+ if isinstance(extra_conf, dict) and extra_conf:
+ properties.update(extra_conf)
+
+ return properties or None
+
+ def is_valid_queries_dict(raw_queries, log_error):
+ """
+ :param raw_queries: dict:
+ :param log_error: function:
+ :return: dict or None
+
+ raw_queries is valid when: type <dict> and not empty after is_valid_query(for all queries)
+ """
+
+ def is_valid_query(query):
+ return all([isinstance(query, str),
+ query.startswith(('SELECT', 'select', 'SHOW', 'show'))])
+
+ if hasattr(raw_queries, 'keys') and raw_queries:
+ valid_queries = dict([(n, q) for n, q in raw_queries.items() if is_valid_query(q)])
+ bad_queries = set(raw_queries) - set(valid_queries)
+
+ if bad_queries:
+ log_error('Removed query(s): {queries}'.format(queries=bad_queries))
+ return valid_queries
+ else:
+ log_error('Unsupported "queries" format. Must be not empty <dict>')
+ return None
+
+ if not PY_MYSQL:
+ self.error('MySQLdb or PyMySQL module is needed to use mysql.chart.py plugin')
+ return False
+
+ # Preference: 1. "queries" from the configuration file 2. "queries" from the module
+ self.queries = self.__queries or self.queries
+ # Check if "self.queries" exist, not empty and all queries are in valid format
+ self.queries = is_valid_queries_dict(self.queries, self.error)
+ if not self.queries:
+ return None
+
+ # Get connection properties
+ self.__conn_properties = get_connection_properties(self.configuration, self.extra_conn_properties)
+ if not self.__conn_properties:
+ self.error('Connection properties are missing')
+ return False
+
+ # Create connection to the database
+ self.__connection, error = self.__connect()
+ if error:
+ self.error('Can\'t establish connection to MySQL: {error}'.format(error=error))
+ return False
+
+ try:
+ data = self._get_data()
+ except Exception as error:
+ self.error('_get_data() failed. Error: {error}'.format(error=error))
+ return False
+
+ if isinstance(data, dict) and data:
+ return True
+ self.error("_get_data() returned no data or type is not <dict>")
+ return False
+
+ def _get_raw_data(self, description=None):
+ """
+ Get raw data from MySQL server
+ :return: dict: fetchall() or (fetchall(), description)
+ """
+
+ if not self.__connection:
+ self.__connection, error = self.__connect()
+ if error:
+ return None
+
+ raw_data = dict()
+ queries = dict(self.queries)
+ try:
+ with self.__connection as cursor:
+ for name, query in queries.items():
+ try:
+ cursor.execute(query)
+ except (MySQLdb.ProgrammingError, MySQLdb.OperationalError) as error:
+ if self.__is_error_critical(err_class=exc_info()[0], err_text=str(error)):
+ raise RuntimeError
+ self.error('Removed query: {name}[{query}]. Error: error'.format(name=name,
+ query=query,
+ error=error))
+ self.queries.pop(name)
+ continue
+ else:
+ raw_data[name] = (cursor.fetchall(), cursor.description) if description else cursor.fetchall()
+ self.__connection.commit()
+ except (MySQLdb.MySQLError, RuntimeError, TypeError, AttributeError):
+ self.__connection.close()
+ self.__connection = None
+ return None
+ else:
+ return raw_data or None
+
+ @staticmethod
+ def __is_error_critical(err_class, err_text):
+ return err_class == MySQLdb.OperationalError and all(['denied' not in err_text,
+ 'Unknown column' not in err_text])
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py
new file mode 100644
index 000000000..dd53fbc14
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py
@@ -0,0 +1,261 @@
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from threading import Thread
+from time import sleep
+
+from third_party.monotonic import monotonic
+
+from bases.charts import Charts, ChartError, create_runtime_chart
+from bases.collection import OldVersionCompatibility, safe_print
+from bases.loggers import PythonDLimitedLogger
+
+RUNTIME_CHART_UPDATE = 'BEGIN netdata.runtime_{job_name} {since_last}\n' \
+ 'SET run_time = {elapsed}\n' \
+ 'END\n'
+
+
+class RuntimeCounters:
+ def __init__(self, configuration):
+ """
+ :param configuration: <dict>
+ """
+ self.FREQ = int(configuration.pop('update_every'))
+ self.START_RUN = 0
+ self.NEXT_RUN = 0
+ self.PREV_UPDATE = 0
+ self.SINCE_UPDATE = 0
+ self.ELAPSED = 0
+ self.RETRIES = 0
+ self.RETRIES_MAX = configuration.pop('retries')
+ self.PENALTY = 0
+ self.RUNS = 1
+
+ def is_sleep_time(self):
+ return self.START_RUN < self.NEXT_RUN
+
+
+class SimpleService(Thread, PythonDLimitedLogger, OldVersionCompatibility, object):
+ """
+ Prototype of Service class.
+ Implemented basic functionality to run jobs by `python.d.plugin`
+ """
+ def __init__(self, configuration, name=''):
+ """
+ :param configuration: <dict>
+ :param name: <str>
+ """
+ Thread.__init__(self)
+ self.daemon = True
+ PythonDLimitedLogger.__init__(self)
+ OldVersionCompatibility.__init__(self)
+ self.configuration = configuration
+ self.order = list()
+ self.definitions = dict()
+
+ self.module_name = self.__module__
+ self.job_name = configuration.pop('job_name')
+ self.override_name = configuration.pop('override_name')
+ self.fake_name = None
+
+ self._runtime_counters = RuntimeCounters(configuration=configuration)
+ self.charts = Charts(job_name=self.actual_name,
+ priority=configuration.pop('priority'),
+ cleanup=configuration.pop('chart_cleanup'),
+ get_update_every=self.get_update_every,
+ module_name=self.module_name)
+
+ def __repr__(self):
+ return '<{cls_bases}: {name}>'.format(cls_bases=', '.join(c.__name__ for c in self.__class__.__bases__),
+ name=self.name)
+
+ @property
+ def name(self):
+ if self.job_name:
+ return '_'.join([self.module_name, self.override_name or self.job_name])
+ return self.module_name
+
+ def actual_name(self):
+ return self.fake_name or self.name
+
+ @property
+ def runs_counter(self):
+ return self._runtime_counters.RUNS
+
+ @property
+ def update_every(self):
+ return self._runtime_counters.FREQ
+
+ @update_every.setter
+ def update_every(self, value):
+ """
+ :param value: <int>
+ :return:
+ """
+ self._runtime_counters.FREQ = value
+
+ def get_update_every(self):
+ return self.update_every
+
+ def check(self):
+ """
+ check() prototype
+ :return: boolean
+ """
+ self.debug("job doesn't implement check() method. Using default which simply invokes get_data().")
+ data = self.get_data()
+ if data and isinstance(data, dict):
+ return True
+ self.debug('returned value is wrong: {0}'.format(data))
+ return False
+
+ @create_runtime_chart
+ def create(self):
+ for chart_name in self.order:
+ chart_config = self.definitions.get(chart_name)
+
+ if not chart_config:
+ self.debug("create() => [NOT ADDED] chart '{chart_name}' not in definitions. "
+ "Skipping it.".format(chart_name=chart_name))
+ continue
+
+ # create chart
+ chart_params = [chart_name] + chart_config['options']
+ try:
+ self.charts.add_chart(params=chart_params)
+ except ChartError as error:
+ self.error("create() => [NOT ADDED] (chart '{chart}': {error})".format(chart=chart_name,
+ error=error))
+ continue
+
+ # add dimensions to chart
+ for dimension in chart_config['lines']:
+ try:
+ self.charts[chart_name].add_dimension(dimension)
+ except ChartError as error:
+ self.error("create() => [NOT ADDED] (dimension '{dimension}': {error})".format(dimension=dimension,
+ error=error))
+ continue
+
+ # add variables to chart
+ if 'variables' in chart_config:
+ for variable in chart_config['variables']:
+ try:
+ self.charts[chart_name].add_variable(variable)
+ except ChartError as error:
+ self.error("create() => [NOT ADDED] (variable '{var}': {error})".format(var=variable,
+ error=error))
+ continue
+
+ del self.order
+ del self.definitions
+
+ # True if job has at least 1 chart else False
+ return bool(self.charts)
+
+ def run(self):
+ """
+ Runs job in thread. Handles retries.
+ Exits when job failed or timed out.
+ :return: None
+ """
+ job = self._runtime_counters
+ self.debug('started, update frequency: {freq}, '
+ 'retries: {retries}'.format(freq=job.FREQ, retries=job.RETRIES_MAX - job.RETRIES))
+
+ while True:
+ job.START_RUN = monotonic()
+
+ job.NEXT_RUN = job.START_RUN - (job.START_RUN % job.FREQ) + job.FREQ + job.PENALTY
+
+ self.sleep_until_next_run()
+
+ if job.PREV_UPDATE:
+ job.SINCE_UPDATE = int((job.START_RUN - job.PREV_UPDATE) * 1e6)
+
+ try:
+ updated = self.update(interval=job.SINCE_UPDATE)
+ except Exception as error:
+ self.error('update() unhandled exception: {error}'.format(error=error))
+ updated = False
+
+ job.RUNS += 1
+
+ if not updated:
+ if not self.manage_retries():
+ return
+ else:
+ job.ELAPSED = int((monotonic() - job.START_RUN) * 1e3)
+ job.PREV_UPDATE = job.START_RUN
+ job.RETRIES, job.PENALTY = 0, 0
+ safe_print(RUNTIME_CHART_UPDATE.format(job_name=self.name,
+ since_last=job.SINCE_UPDATE,
+ elapsed=job.ELAPSED))
+ self.debug('update => [{status}] (elapsed time: {elapsed}, '
+ 'retries left: {retries})'.format(status='OK' if updated else 'FAILED',
+ elapsed=job.ELAPSED if updated else '-',
+ retries=job.RETRIES_MAX - job.RETRIES))
+
+ def update(self, interval):
+ """
+ :return:
+ """
+ data = self.get_data()
+ if not data:
+ self.debug('get_data() returned no data')
+ return False
+ elif not isinstance(data, dict):
+ self.debug('get_data() returned incorrect type data')
+ return False
+
+ updated = False
+
+ for chart in self.charts:
+ if chart.flags.obsoleted:
+ if chart.can_be_updated(data):
+ chart.refresh()
+ else:
+ continue
+ elif self.charts.cleanup and chart.penalty >= self.charts.cleanup:
+ chart.obsolete()
+ self.error("chart '{0}' was suppressed due to non updating".format(chart.name))
+ continue
+
+ ok = chart.update(data, interval)
+ if ok:
+ updated = True
+
+ if not updated:
+ self.debug('none of the charts has been updated')
+
+ return updated
+
+ def manage_retries(self):
+ rc = self._runtime_counters
+ rc.RETRIES += 1
+ if rc.RETRIES % 5 == 0:
+ rc.PENALTY = int(rc.RETRIES * self.update_every / 2)
+ if rc.RETRIES >= rc.RETRIES_MAX:
+ self.error('stopped after {0} data collection failures in a row'.format(rc.RETRIES_MAX))
+ return False
+ return True
+
+ def sleep_until_next_run(self):
+ job = self._runtime_counters
+
+ # sleep() is interruptable
+ while job.is_sleep_time():
+ sleep_time = job.NEXT_RUN - job.START_RUN
+ self.debug('sleeping for {sleep_time} to reach frequency of {freq} sec'.format(sleep_time=sleep_time,
+ freq=job.FREQ + job.PENALTY))
+ sleep(sleep_time)
+ job.START_RUN = monotonic()
+
+ def get_data(self):
+ return self._get_data()
+
+ def _get_data(self):
+ raise NotImplementedError
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py
new file mode 100644
index 000000000..e85455307
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py
@@ -0,0 +1,309 @@
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import socket
+
+try:
+ import ssl
+except ImportError:
+ _TLS_SUPPORT = False
+else:
+ _TLS_SUPPORT = True
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+
+class SocketService(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ self._sock = None
+ self._keep_alive = False
+ self.host = 'localhost'
+ self.port = None
+ self.unix_socket = None
+ self.dgram_socket = False
+ self.request = ''
+ self.tls = False
+ self.cert = None
+ self.key = None
+ self.__socket_config = None
+ self.__empty_request = "".encode()
+ SimpleService.__init__(self, configuration=configuration, name=name)
+
+ def _socket_error(self, message=None):
+ if self.unix_socket is not None:
+ self.error('unix socket "{socket}": {message}'.format(socket=self.unix_socket,
+ message=message))
+ else:
+ if self.__socket_config is not None:
+ _, _, _, _, sa = self.__socket_config
+ self.error('socket to "{address}" port {port}: {message}'.format(address=sa[0],
+ port=sa[1],
+ message=message))
+ else:
+ self.error('unknown socket: {0}'.format(message))
+
+ def _connect2socket(self, res=None):
+ """
+ Connect to a socket, passing the result of getaddrinfo()
+ :return: boolean
+ """
+ if res is None:
+ res = self.__socket_config
+ if res is None:
+ self.error("Cannot create socket to 'None':")
+ return False
+
+ af, sock_type, proto, _, sa = res
+ try:
+ self.debug('Creating socket to "{address}", port {port}'.format(address=sa[0], port=sa[1]))
+ self._sock = socket.socket(af, sock_type, proto)
+ except socket.error as error:
+ self.error('Failed to create socket "{address}", port {port}, error: {error}'.format(address=sa[0],
+ port=sa[1],
+ error=error))
+ self._sock = None
+ self.__socket_config = None
+ return False
+
+ if self.tls:
+ try:
+ self.debug('Encapsulating socket with TLS')
+ self._sock = ssl.wrap_socket(self._sock,
+ keyfile=self.key,
+ certfile=self.cert,
+ server_side=False,
+ cert_reqs=ssl.CERT_NONE)
+ except (socket.error, ssl.SSLError) as error:
+ self.error('Failed to wrap socket.')
+ self._disconnect()
+ self.__socket_config = None
+ return False
+
+ try:
+ self.debug('connecting socket to "{address}", port {port}'.format(address=sa[0], port=sa[1]))
+ self._sock.connect(sa)
+ except (socket.error, ssl.SSLError) as error:
+ self.error('Failed to connect to "{address}", port {port}, error: {error}'.format(address=sa[0],
+ port=sa[1],
+ error=error))
+ self._disconnect()
+ self.__socket_config = None
+ return False
+
+ self.debug('connected to "{address}", port {port}'.format(address=sa[0], port=sa[1]))
+ self.__socket_config = res
+ return True
+
+ def _connect2unixsocket(self):
+ """
+ Connect to a unix socket, given its filename
+ :return: boolean
+ """
+ if self.unix_socket is None:
+ self.error("cannot connect to unix socket 'None'")
+ return False
+
+ try:
+ self.debug('attempting DGRAM unix socket "{0}"'.format(self.unix_socket))
+ self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
+ self._sock.connect(self.unix_socket)
+ self.debug('connected DGRAM unix socket "{0}"'.format(self.unix_socket))
+ return True
+ except socket.error as error:
+ self.debug('Failed to connect DGRAM unix socket "{socket}": {error}'.format(socket=self.unix_socket,
+ error=error))
+
+ try:
+ self.debug('attempting STREAM unix socket "{0}"'.format(self.unix_socket))
+ self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ self._sock.connect(self.unix_socket)
+ self.debug('connected STREAM unix socket "{0}"'.format(self.unix_socket))
+ return True
+ except socket.error as error:
+ self.debug('Failed to connect STREAM unix socket "{socket}": {error}'.format(socket=self.unix_socket,
+ error=error))
+ self._sock = None
+ return False
+
+ def _connect(self):
+ """
+ Recreate socket and connect to it since sockets cannot be reused after closing
+ Available configurations are IPv6, IPv4 or UNIX socket
+ :return:
+ """
+ try:
+ if self.unix_socket is not None:
+ self._connect2unixsocket()
+
+ else:
+ if self.__socket_config is not None:
+ self._connect2socket()
+ else:
+ if self.dgram_socket:
+ sock_type = socket.SOCK_DGRAM
+ else:
+ sock_type = socket.SOCK_STREAM
+ for res in socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, sock_type):
+ if self._connect2socket(res):
+ break
+
+ except Exception:
+ self._sock = None
+ self.__socket_config = None
+
+ if self._sock is not None:
+ self._sock.setblocking(0)
+ self._sock.settimeout(5)
+ self.debug('set socket timeout to: {0}'.format(self._sock.gettimeout()))
+
+ def _disconnect(self):
+ """
+ Close socket connection
+ :return:
+ """
+ if self._sock is not None:
+ try:
+ self.debug('closing socket')
+ self._sock.shutdown(2) # 0 - read, 1 - write, 2 - all
+ self._sock.close()
+ except Exception:
+ pass
+ self._sock = None
+
+ def _send(self, request=None):
+ """
+ Send request.
+ :return: boolean
+ """
+ # Send request if it is needed
+ if self.request != self.__empty_request:
+ try:
+ self.debug('sending request: {0}'.format(request or self.request))
+ self._sock.send(request or self.request)
+ except Exception as error:
+ self._socket_error('error sending request: {0}'.format(error))
+ self._disconnect()
+ return False
+ return True
+
+ def _receive(self, raw=False):
+ """
+ Receive data from socket
+ :param raw: set `True` to return bytes
+ :type raw: bool
+ :return: decoded str or raw bytes
+ :rtype: str/bytes
+ """
+ data = "" if not raw else b""
+ while True:
+ self.debug('receiving response')
+ try:
+ buf = self._sock.recv(4096)
+ except Exception as error:
+ self._socket_error('failed to receive response: {0}'.format(error))
+ self._disconnect()
+ break
+
+ if buf is None or len(buf) == 0: # handle server disconnect
+ if data == "" or data == b"":
+ self._socket_error('unexpectedly disconnected')
+ else:
+ self.debug('server closed the connection')
+ self._disconnect()
+ break
+
+ self.debug('received data')
+ data += buf.decode('utf-8', 'ignore') if not raw else buf
+ if self._check_raw_data(data):
+ break
+
+ self.debug('final response: {0}'.format(data))
+ return data
+
+ def _get_raw_data(self, raw=False, request=None):
+ """
+ Get raw data with low-level "socket" module.
+ :param raw: set `True` to return bytes
+ :type raw: bool
+ :return: decoded data (str) or raw data (bytes)
+ :rtype: str/bytes
+ """
+ if self._sock is None:
+ self._connect()
+ if self._sock is None:
+ return None
+
+ # Send request if it is needed
+ if not self._send(request):
+ return None
+
+ data = self._receive(raw)
+
+ if not self._keep_alive:
+ self._disconnect()
+
+ return data
+
+ @staticmethod
+ def _check_raw_data(data):
+ """
+ Check if all data has been gathered from socket
+ :param data: str
+ :return: boolean
+ """
+ return bool(data)
+
+ def _parse_config(self):
+ """
+ Parse configuration data
+ :return: boolean
+ """
+ try:
+ self.unix_socket = str(self.configuration['socket'])
+ except (KeyError, TypeError):
+ self.debug('No unix socket specified. Trying TCP/IP socket.')
+ self.unix_socket = None
+ try:
+ self.host = str(self.configuration['host'])
+ except (KeyError, TypeError):
+ self.debug('No host specified. Using: "{0}"'.format(self.host))
+ try:
+ self.port = int(self.configuration['port'])
+ except (KeyError, TypeError):
+ self.debug('No port specified. Using: "{0}"'.format(self.port))
+
+ self.tls = bool(self.configuration.get('tls', self.tls))
+ if self.tls and not _TLS_SUPPORT:
+ self.warning('TLS requested but no TLS module found, disabling TLS support.')
+ self.tls = False
+ if _TLS_SUPPORT and not self.tls:
+ self.debug('No TLS preference specified, not using TLS.')
+
+ if self.tls and _TLS_SUPPORT:
+ self.key = self.configuration.get('tls_key_file')
+ self.cert = self.configuration.get('tls_cert_file')
+ if not self.cert:
+ # If there's not a valid certificate, clear the key too.
+ self.debug('No valid TLS client certificate configuration found.')
+ self.key = None
+ self.cert = None
+ elif not self.key:
+ # If a key isn't listed, the config may still be
+ # valid, because there may be a key attached to the
+ # certificate.
+ self.info('No TLS client key specified, assuming it\'s attached to the certificate.')
+ self.key = None
+
+ try:
+ self.request = str(self.configuration['request'])
+ except (KeyError, TypeError):
+ self.debug('No request specified. Using: "{0}"'.format(self.request))
+
+ self.request = self.request.encode()
+
+ def check(self):
+ self._parse_config()
+ return SimpleService.check(self)
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py
new file mode 100644
index 000000000..856f38851
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py
@@ -0,0 +1,146 @@
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import urllib3
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+try:
+ urllib3.disable_warnings()
+except AttributeError:
+ pass
+
+
+class UrlService(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.url = self.configuration.get('url')
+ self.user = self.configuration.get('user')
+ self.password = self.configuration.get('pass')
+ self.proxy_user = self.configuration.get('proxy_user')
+ self.proxy_password = self.configuration.get('proxy_pass')
+ self.proxy_url = self.configuration.get('proxy_url')
+ self.method = self.configuration.get('method', 'GET')
+ self.header = self.configuration.get('header')
+ self.request_timeout = self.configuration.get('timeout', 1)
+ self.tls_verify = self.configuration.get('tls_verify')
+ self.tls_ca_file = self.configuration.get('tls_ca_file')
+ self.tls_key_file = self.configuration.get('tls_key_file')
+ self.tls_cert_file = self.configuration.get('tls_cert_file')
+ self._manager = None
+
+ def __make_headers(self, **header_kw):
+ user = header_kw.get('user') or self.user
+ password = header_kw.get('pass') or self.password
+ proxy_user = header_kw.get('proxy_user') or self.proxy_user
+ proxy_password = header_kw.get('proxy_pass') or self.proxy_password
+ custom_header = header_kw.get('header') or self.header
+ header_params = dict(keep_alive=True)
+ proxy_header_params = dict()
+ if user and password:
+ header_params['basic_auth'] = '{user}:{password}'.format(user=user,
+ password=password)
+ if proxy_user and proxy_password:
+ proxy_header_params['proxy_basic_auth'] = '{user}:{password}'.format(user=proxy_user,
+ password=proxy_password)
+ try:
+ header, proxy_header = urllib3.make_headers(**header_params), urllib3.make_headers(**proxy_header_params)
+ except TypeError as error:
+ self.error('build_header() error: {error}'.format(error=error))
+ return None, None
+ else:
+ header.update(custom_header or dict())
+ return header, proxy_header
+
+ def _build_manager(self, **header_kw):
+ header, proxy_header = self.__make_headers(**header_kw)
+ if header is None or proxy_header is None:
+ return None
+ proxy_url = header_kw.get('proxy_url') or self.proxy_url
+ if proxy_url:
+ manager = urllib3.ProxyManager
+ params = dict(proxy_url=proxy_url, headers=header, proxy_headers=proxy_header)
+ else:
+ manager = urllib3.PoolManager
+ params = dict(headers=header)
+ tls_cert_file = self.tls_cert_file
+ if tls_cert_file:
+ params['cert_file'] = tls_cert_file
+ # NOTE: key_file is useless without cert_file, but
+ # cert_file may include the key as well.
+ tls_key_file = self.tls_key_file
+ if tls_key_file:
+ params['key_file'] = tls_key_file
+ tls_ca_file = self.tls_ca_file
+ if tls_ca_file:
+ params['ca_certs'] = tls_ca_file
+ try:
+ url = header_kw.get('url') or self.url
+ if url.startswith('https') and not self.tls_verify and not tls_ca_file:
+ params['ca_certs'] = None
+ return manager(assert_hostname=False, cert_reqs='CERT_NONE', **params)
+ return manager(**params)
+ except (urllib3.exceptions.ProxySchemeUnknown, TypeError) as error:
+ self.error('build_manager() error:', str(error))
+ return None
+
+ def _get_raw_data(self, url=None, manager=None):
+ """
+ Get raw data from http request
+ :return: str
+ """
+ try:
+ status, data = self._get_raw_data_with_status(url, manager)
+ except (urllib3.exceptions.HTTPError, TypeError, AttributeError) as error:
+ self.error('Url: {url}. Error: {error}'.format(url=url or self.url, error=error))
+ return None
+
+ if status == 200:
+ return data
+ else:
+ self.debug('Url: {url}. Http response status code: {code}'.format(url=url or self.url, code=status))
+ return None
+
+ def _get_raw_data_with_status(self, url=None, manager=None, retries=1, redirect=True):
+ """
+ Get status and response body content from http request. Does not catch exceptions
+ :return: int, str
+ """
+ url = url or self.url
+ manager = manager or self._manager
+ response = manager.request(method=self.method,
+ url=url,
+ timeout=self.request_timeout,
+ retries=retries,
+ headers=manager.headers,
+ redirect=redirect)
+ if isinstance(response.data, str):
+ return response.status, response.data
+ return response.status, response.data.decode()
+
+ def check(self):
+ """
+ Format configuration data and try to connect to server
+ :return: boolean
+ """
+ if not (self.url and isinstance(self.url, str)):
+ self.error('URL is not defined or type is not <str>')
+ return False
+
+ self._manager = self._build_manager()
+ if not self._manager:
+ return False
+
+ try:
+ data = self._get_data()
+ except Exception as error:
+ self.error('_get_data() failed. Url: {url}. Error: {error}'.format(url=self.url, error=error))
+ return False
+
+ if isinstance(data, dict) and data:
+ return True
+ self.error('_get_data() returned no data or type is not <dict>')
+ return False
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/__init__.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/__init__.py
diff --git a/collectors/python.d.plugin/python_modules/bases/__init__.py b/collectors/python.d.plugin/python_modules/bases/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/__init__.py
diff --git a/collectors/python.d.plugin/python_modules/bases/charts.py b/collectors/python.d.plugin/python_modules/bases/charts.py
new file mode 100644
index 000000000..2963739ec
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/charts.py
@@ -0,0 +1,394 @@
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from bases.collection import safe_print
+
+CHART_PARAMS = ['type', 'id', 'name', 'title', 'units', 'family', 'context', 'chart_type', 'hidden']
+DIMENSION_PARAMS = ['id', 'name', 'algorithm', 'multiplier', 'divisor', 'hidden']
+VARIABLE_PARAMS = ['id', 'value']
+
+CHART_TYPES = ['line', 'area', 'stacked']
+DIMENSION_ALGORITHMS = ['absolute', 'incremental', 'percentage-of-absolute-row', 'percentage-of-incremental-row']
+
+CHART_BEGIN = 'BEGIN {type}.{id} {since_last}\n'
+CHART_CREATE = "CHART {type}.{id} '{name}' '{title}' '{units}' '{family}' '{context}' " \
+ "{chart_type} {priority} {update_every} '{hidden}' 'python.d.plugin' '{module_name}'\n"
+CHART_OBSOLETE = "CHART {type}.{id} '{name}' '{title}' '{units}' '{family}' '{context}' " \
+ "{chart_type} {priority} {update_every} '{hidden} obsolete'\n"
+
+
+DIMENSION_CREATE = "DIMENSION '{id}' '{name}' {algorithm} {multiplier} {divisor} '{hidden}'\n"
+DIMENSION_SET = "SET '{id}' = {value}\n"
+
+CHART_VARIABLE_SET = "VARIABLE CHART '{id}' = {value}\n"
+
+RUNTIME_CHART_CREATE = "CHART netdata.runtime_{job_name} '' 'Execution time for {job_name}' 'ms' 'python.d' " \
+ "netdata.pythond_runtime line 145000 {update_every}\n" \
+ "DIMENSION run_time 'run time' absolute 1 1\n"
+
+
+def create_runtime_chart(func):
+ """
+ Calls a wrapped function, then prints runtime chart to stdout.
+
+ Used as a decorator for SimpleService.create() method.
+ The whole point of making 'create runtime chart' functionality as a decorator was
+ to help users who re-implements create() in theirs classes.
+
+ :param func: class method
+ :return:
+ """
+ def wrapper(*args, **kwargs):
+ self = args[0]
+ ok = func(*args, **kwargs)
+ if ok:
+ safe_print(RUNTIME_CHART_CREATE.format(job_name=self.name,
+ update_every=self._runtime_counters.FREQ))
+ return ok
+ return wrapper
+
+
+class ChartError(Exception):
+ """Base-class for all exceptions raised by this module"""
+
+
+class DuplicateItemError(ChartError):
+ """Occurs when user re-adds a chart or a dimension that has already been added"""
+
+
+class ItemTypeError(ChartError):
+ """Occurs when user passes value of wrong type to Chart, Dimension or ChartVariable class"""
+
+
+class ItemValueError(ChartError):
+ """Occurs when user passes inappropriate value to Chart, Dimension or ChartVariable class"""
+
+
+class Charts:
+ """Represent a collection of charts
+
+ All charts stored in a dict.
+ Chart is a instance of Chart class.
+ Charts adding must be done using Charts.add_chart() method only"""
+ def __init__(self, job_name, priority, cleanup, get_update_every, module_name):
+ """
+ :param job_name: <bound method>
+ :param priority: <int>
+ :param get_update_every: <bound method>
+ """
+ self.job_name = job_name
+ self.priority = priority
+ self.cleanup = cleanup
+ self.get_update_every = get_update_every
+ self.module_name = module_name
+ self.charts = dict()
+
+ def __len__(self):
+ return len(self.charts)
+
+ def __iter__(self):
+ return iter(self.charts.values())
+
+ def __repr__(self):
+ return 'Charts({0})'.format(self)
+
+ def __str__(self):
+ return str([chart for chart in self.charts])
+
+ def __contains__(self, item):
+ return item in self.charts
+
+ def __getitem__(self, item):
+ return self.charts[item]
+
+ def __delitem__(self, key):
+ del self.charts[key]
+
+ def __bool__(self):
+ return bool(self.charts)
+
+ def __nonzero__(self):
+ return self.__bool__()
+
+ def add_chart(self, params):
+ """
+ Create Chart instance and add it to the dict
+
+ Manually adds job name, priority and update_every to params.
+ :param params: <list>
+ :return:
+ """
+ params = [self.job_name()] + params
+ new_chart = Chart(params)
+
+ new_chart.params['update_every'] = self.get_update_every()
+ new_chart.params['priority'] = self.priority
+ new_chart.params['module_name'] = self.module_name
+
+ self.priority += 1
+ self.charts[new_chart.id] = new_chart
+
+ return new_chart
+
+ def active_charts(self):
+ return [chart.id for chart in self if not chart.flags.obsoleted]
+
+
+class Chart:
+ """Represent a chart"""
+ def __init__(self, params):
+ """
+ :param params: <list>
+ """
+ if not isinstance(params, list):
+ raise ItemTypeError("'chart' must be a list type")
+ if not len(params) >= 8:
+ raise ItemValueError("invalid value for 'chart', must be {0}".format(CHART_PARAMS))
+
+ self.params = dict(zip(CHART_PARAMS, (p or str() for p in params)))
+ self.name = '{type}.{id}'.format(type=self.params['type'],
+ id=self.params['id'])
+ if self.params.get('chart_type') not in CHART_TYPES:
+ self.params['chart_type'] = 'absolute'
+ hidden = str(self.params.get('hidden', ''))
+ self.params['hidden'] = 'hidden' if hidden == 'hidden' else ''
+
+ self.dimensions = list()
+ self.variables = set()
+ self.flags = ChartFlags()
+ self.penalty = 0
+
+ def __getattr__(self, item):
+ try:
+ return self.params[item]
+ except KeyError:
+ raise AttributeError("'{instance}' has no attribute '{attr}'".format(instance=repr(self),
+ attr=item))
+
+ def __repr__(self):
+ return 'Chart({0})'.format(self.id)
+
+ def __str__(self):
+ return self.id
+
+ def __iter__(self):
+ return iter(self.dimensions)
+
+ def __contains__(self, item):
+ return item in [dimension.id for dimension in self.dimensions]
+
+ def add_variable(self, variable):
+ """
+ :param variable: <list>
+ :return:
+ """
+ self.variables.add(ChartVariable(variable))
+
+ def add_dimension(self, dimension):
+ """
+ :param dimension: <list>
+ :return:
+ """
+ dim = Dimension(dimension)
+
+ if dim.id in self:
+ raise DuplicateItemError("'{dimension}' already in '{chart}' dimensions".format(dimension=dim.id,
+ chart=self.name))
+ self.refresh()
+ self.dimensions.append(dim)
+ return dim
+
+ def hide_dimension(self, dimension_id, reverse=False):
+ if dimension_id in self:
+ idx = self.dimensions.index(dimension_id)
+ dimension = self.dimensions[idx]
+ dimension.params['hidden'] = 'hidden' if not reverse else str()
+ self.refresh()
+
+ def create(self):
+ """
+ :return:
+ """
+ chart = CHART_CREATE.format(**self.params)
+ dimensions = ''.join([dimension.create() for dimension in self.dimensions])
+ variables = ''.join([var.set(var.value) for var in self.variables if var])
+
+ self.flags.push = False
+ self.flags.created = True
+
+ safe_print(chart + dimensions + variables)
+
+ def can_be_updated(self, data):
+ for dim in self.dimensions:
+ if dim.get_value(data) is not None:
+ return True
+ return False
+
+ def update(self, data, interval):
+ updated_dimensions, updated_variables = str(), str()
+
+ for dim in self.dimensions:
+ value = dim.get_value(data)
+ if value is not None:
+ updated_dimensions += dim.set(value)
+
+ for var in self.variables:
+ value = var.get_value(data)
+ if value is not None:
+ updated_variables += var.set(value)
+
+ if updated_dimensions:
+ since_last = interval if self.flags.updated else 0
+
+ if self.flags.push:
+ self.create()
+
+ chart_begin = CHART_BEGIN.format(type=self.type, id=self.id, since_last=since_last)
+ safe_print(chart_begin, updated_dimensions, updated_variables, 'END\n')
+
+ self.flags.updated = True
+ self.penalty = 0
+ else:
+ self.penalty += 1
+ self.flags.updated = False
+
+ return bool(updated_dimensions)
+
+ def obsolete(self):
+ self.flags.obsoleted = True
+ if self.flags.created:
+ safe_print(CHART_OBSOLETE.format(**self.params))
+
+ def refresh(self):
+ self.penalty = 0
+ self.flags.push = True
+ self.flags.obsoleted = False
+
+
+class Dimension:
+ """Represent a dimension"""
+ def __init__(self, params):
+ """
+ :param params: <list>
+ """
+ if not isinstance(params, list):
+ raise ItemTypeError("'dimension' must be a list type")
+ if not params:
+ raise ItemValueError("invalid value for 'dimension', must be {0}".format(DIMENSION_PARAMS))
+
+ self.params = dict(zip(DIMENSION_PARAMS, (p or str() for p in params)))
+ self.params['name'] = self.params.get('name') or self.params['id']
+
+ if self.params.get('algorithm') not in DIMENSION_ALGORITHMS:
+ self.params['algorithm'] = 'absolute'
+ if not isinstance(self.params.get('multiplier'), int):
+ self.params['multiplier'] = 1
+ if not isinstance(self.params.get('divisor'), int):
+ self.params['divisor'] = 1
+ self.params.setdefault('hidden', '')
+
+ def __getattr__(self, item):
+ try:
+ return self.params[item]
+ except KeyError:
+ raise AttributeError("'{instance}' has no attribute '{attr}'".format(instance=repr(self),
+ attr=item))
+
+ def __repr__(self):
+ return 'Dimension({0})'.format(self.id)
+
+ def __str__(self):
+ return self.id
+
+ def __eq__(self, other):
+ if not isinstance(other, Dimension):
+ return self.id == other
+ return self.id == other.id
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __hash__(self):
+ return hash(repr(self))
+
+ def create(self):
+ return DIMENSION_CREATE.format(**self.params)
+
+ def set(self, value):
+ """
+ :param value: <str>: must be a digit
+ :return:
+ """
+ return DIMENSION_SET.format(id=self.id,
+ value=value)
+
+ def get_value(self, data):
+ try:
+ return int(data[self.id])
+ except (KeyError, TypeError):
+ return None
+
+
+class ChartVariable:
+ """Represent a chart variable"""
+ def __init__(self, params):
+ """
+ :param params: <list>
+ """
+ if not isinstance(params, list):
+ raise ItemTypeError("'variable' must be a list type")
+ if not params:
+ raise ItemValueError("invalid value for 'variable' must be: {0}".format(VARIABLE_PARAMS))
+
+ self.params = dict(zip(VARIABLE_PARAMS, params))
+ self.params.setdefault('value', None)
+
+ def __getattr__(self, item):
+ try:
+ return self.params[item]
+ except KeyError:
+ raise AttributeError("'{instance}' has no attribute '{attr}'".format(instance=repr(self),
+ attr=item))
+
+ def __bool__(self):
+ return self.value is not None
+
+ def __nonzero__(self):
+ return self.__bool__()
+
+ def __repr__(self):
+ return 'ChartVariable({0})'.format(self.id)
+
+ def __str__(self):
+ return self.id
+
+ def __eq__(self, other):
+ if isinstance(other, ChartVariable):
+ return self.id == other.id
+ return False
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __hash__(self):
+ return hash(repr(self))
+
+ def set(self, value):
+ return CHART_VARIABLE_SET.format(id=self.id,
+ value=value)
+
+ def get_value(self, data):
+ try:
+ return int(data[self.id])
+ except (KeyError, TypeError):
+ return None
+
+
+class ChartFlags:
+ def __init__(self):
+ self.push = True
+ self.created = False
+ self.updated = False
+ self.obsoleted = False
diff --git a/collectors/python.d.plugin/python_modules/bases/collection.py b/collectors/python.d.plugin/python_modules/bases/collection.py
new file mode 100644
index 000000000..479a3b610
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/collection.py
@@ -0,0 +1,145 @@
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import os
+
+PATH = os.getenv('PATH', '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin').split(':')
+
+CHART_BEGIN = 'BEGIN {0} {1}\n'
+CHART_CREATE = "CHART {0} '{1}' '{2}' '{3}' '{4}' '{5}' {6} {7} {8}\n"
+DIMENSION_CREATE = "DIMENSION '{0}' '{1}' {2} {3} {4} '{5}'\n"
+DIMENSION_SET = "SET '{0}' = {1}\n"
+
+
+def setdefault_values(config, base_dict):
+ for key, value in base_dict.items():
+ config.setdefault(key, value)
+ return config
+
+
+def run_and_exit(func):
+ def wrapper(*args, **kwargs):
+ func(*args, **kwargs)
+ exit(1)
+ return wrapper
+
+
+def on_try_except_finally(on_except=(None, ), on_finally=(None, )):
+ except_func = on_except[0]
+ finally_func = on_finally[0]
+
+ def decorator(func):
+ def wrapper(*args, **kwargs):
+ try:
+ func(*args, **kwargs)
+ except Exception:
+ if except_func:
+ except_func(*on_except[1:])
+ finally:
+ if finally_func:
+ finally_func(*on_finally[1:])
+ return wrapper
+ return decorator
+
+
+def static_vars(**kwargs):
+ def decorate(func):
+ for k in kwargs:
+ setattr(func, k, kwargs[k])
+ return func
+ return decorate
+
+
+@on_try_except_finally(on_except=(exit, 1))
+def safe_print(*msg):
+ """
+ :param msg:
+ :return:
+ """
+ print(''.join(msg))
+
+
+def find_binary(binary):
+ """
+ :param binary: <str>
+ :return:
+ """
+ for directory in PATH:
+ binary_name = '/'.join([directory, binary])
+ if os.path.isfile(binary_name) and os.access(binary_name, os.X_OK):
+ return binary_name
+ return None
+
+
+def read_last_line(f):
+ with open(f, 'rb') as opened:
+ opened.seek(-2, 2)
+ while opened.read(1) != b'\n':
+ opened.seek(-2, 1)
+ if opened.tell() == 0:
+ break
+ result = opened.readline()
+ return result.decode()
+
+
+class OldVersionCompatibility:
+
+ def __init__(self):
+ self._data_stream = str()
+
+ def begin(self, type_id, microseconds=0):
+ """
+ :param type_id: <str>
+ :param microseconds: <str> or <int>: must be a digit
+ :return:
+ """
+ self._data_stream += CHART_BEGIN.format(type_id, microseconds)
+
+ def set(self, dim_id, value):
+ """
+ :param dim_id: <str>
+ :param value: <int> or <str>: must be a digit
+ :return:
+ """
+ self._data_stream += DIMENSION_SET.format(dim_id, value)
+
+ def end(self):
+ self._data_stream += 'END\n'
+
+ def chart(self, type_id, name='', title='', units='', family='', category='', chart_type='line',
+ priority='', update_every=''):
+ """
+ :param type_id: <str>
+ :param name: <str>
+ :param title: <str>
+ :param units: <str>
+ :param family: <str>
+ :param category: <str>
+ :param chart_type: <str>
+ :param priority: <str> or <int>
+ :param update_every: <str> or <int>
+ :return:
+ """
+ self._data_stream += CHART_CREATE.format(type_id, name, title, units,
+ family, category, chart_type,
+ priority, update_every)
+
+ def dimension(self, dim_id, name=None, algorithm="absolute", multiplier=1, divisor=1, hidden=False):
+ """
+ :param dim_id: <str>
+ :param name: <str> or None
+ :param algorithm: <str>
+ :param multiplier: <str> or <int>: must be a digit
+ :param divisor: <str> or <int>: must be a digit
+ :param hidden: <str>: literally "hidden" or ""
+ :return:
+ """
+ self._data_stream += DIMENSION_CREATE.format(dim_id, name or dim_id, algorithm,
+ multiplier, divisor, hidden or str())
+
+ @on_try_except_finally(on_except=(exit, 1))
+ def commit(self):
+ print(self._data_stream)
+ self._data_stream = str()
diff --git a/collectors/python.d.plugin/python_modules/bases/loaders.py b/collectors/python.d.plugin/python_modules/bases/loaders.py
new file mode 100644
index 000000000..9eb268ce7
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/loaders.py
@@ -0,0 +1,83 @@
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import types
+
+from sys import version_info
+
+PY_VERSION = version_info[:2]
+
+try:
+ if PY_VERSION > (3, 1):
+ from pyyaml3 import SafeLoader as YamlSafeLoader
+ else:
+ from pyyaml2 import SafeLoader as YamlSafeLoader
+except ImportError:
+ from yaml import SafeLoader as YamlSafeLoader
+
+
+if PY_VERSION > (3, 1):
+ from importlib.machinery import SourceFileLoader
+ DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map'
+else:
+ from imp import load_source as SourceFileLoader
+ DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map'
+
+try:
+ from collections import OrderedDict
+except ImportError:
+ from third_party.ordereddict import OrderedDict
+
+
+def dict_constructor(loader, node):
+ return OrderedDict(loader.construct_pairs(node))
+
+
+def safe_load(stream):
+ loader = YamlSafeLoader(stream)
+ try:
+ return loader.get_single_data()
+ finally:
+ loader.dispose()
+
+
+YamlSafeLoader.add_constructor(DEFAULT_MAPPING_TAG, dict_constructor)
+
+
+class YamlOrderedLoader:
+ @staticmethod
+ def load_config_from_file(file_name):
+ opened, loaded = False, False
+ try:
+ stream = open(file_name, 'r')
+ opened = True
+ loader = YamlSafeLoader(stream)
+ loaded = True
+ parsed = loader.get_single_data() or dict()
+ except Exception as error:
+ return dict(), error
+ else:
+ return parsed, None
+ finally:
+ if opened:
+ stream.close()
+ if loaded:
+ loader.dispose()
+
+
+class SourceLoader:
+ @staticmethod
+ def load_module_from_file(name, path):
+ try:
+ loaded = SourceFileLoader(name, path)
+ if isinstance(loaded, types.ModuleType):
+ return loaded, None
+ return loaded.load_module(), None
+ except Exception as error:
+ return None, error
+
+
+class ModuleAndConfigLoader(YamlOrderedLoader, SourceLoader):
+ pass
diff --git a/collectors/python.d.plugin/python_modules/bases/loggers.py b/collectors/python.d.plugin/python_modules/bases/loggers.py
new file mode 100644
index 000000000..39be77a79
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/loggers.py
@@ -0,0 +1,206 @@
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import logging
+import traceback
+
+from sys import exc_info
+
+try:
+ from time import monotonic as time
+except ImportError:
+ from time import time
+
+from bases.collection import on_try_except_finally
+
+
+LOGGING_LEVELS = {'CRITICAL': 50,
+ 'ERROR': 40,
+ 'WARNING': 30,
+ 'INFO': 20,
+ 'DEBUG': 10,
+ 'NOTSET': 0}
+
+DEFAULT_LOG_LINE_FORMAT = '%(asctime)s: %(name)s %(levelname)s : %(message)s'
+DEFAULT_LOG_TIME_FORMAT = '%Y-%m-%d %H:%M:%S'
+
+PYTHON_D_LOG_LINE_FORMAT = '%(asctime)s: %(name)s %(levelname)s: %(module_name)s: %(job_name)s: %(message)s'
+PYTHON_D_LOG_NAME = 'python.d'
+
+
+def limiter(log_max_count=30, allowed_in_seconds=60):
+ def on_decorator(func):
+
+ def on_call(*args):
+ current_time = args[0]._runtime_counters.START_RUN
+ lc = args[0]._logger_counters
+
+ if lc.logged and lc.logged % log_max_count == 0:
+ if current_time - lc.time_to_compare <= allowed_in_seconds:
+ lc.dropped += 1
+ return
+ lc.time_to_compare = current_time
+
+ lc.logged += 1
+ func(*args)
+
+ return on_call
+ return on_decorator
+
+
+def add_traceback(func):
+ def on_call(*args):
+ self = args[0]
+
+ if not self.log_traceback:
+ func(*args)
+ else:
+ if exc_info()[0]:
+ func(*args)
+ func(self, traceback.format_exc())
+ else:
+ func(*args)
+
+ return on_call
+
+
+class LoggerCounters:
+ def __init__(self):
+ self.logged = 0
+ self.dropped = 0
+ self.time_to_compare = time()
+
+ def __repr__(self):
+ return 'LoggerCounter(logged: {logged}, dropped: {dropped})'.format(logged=self.logged,
+ dropped=self.dropped)
+
+
+class BaseLogger(object):
+ def __init__(self, logger_name, log_fmt=DEFAULT_LOG_LINE_FORMAT, date_fmt=DEFAULT_LOG_TIME_FORMAT,
+ handler=logging.StreamHandler):
+ """
+ :param logger_name: <str>
+ :param log_fmt: <str>
+ :param date_fmt: <str>
+ :param handler: <logging handler>
+ """
+ self.logger = logging.getLogger(logger_name)
+ if not self.has_handlers():
+ self.severity = 'INFO'
+ self.logger.addHandler(handler())
+ self.set_formatter(fmt=log_fmt, date_fmt=date_fmt)
+
+ def __repr__(self):
+ return '<Logger: {name})>'.format(name=self.logger.name)
+
+ def set_formatter(self, fmt, date_fmt=DEFAULT_LOG_TIME_FORMAT):
+ """
+ :param fmt: <str>
+ :param date_fmt: <str>
+ :return:
+ """
+ if self.has_handlers():
+ self.logger.handlers[0].setFormatter(logging.Formatter(fmt=fmt, datefmt=date_fmt))
+
+ def has_handlers(self):
+ return self.logger.handlers
+
+ @property
+ def severity(self):
+ return self.logger.getEffectiveLevel()
+
+ @severity.setter
+ def severity(self, level):
+ """
+ :param level: <str> or <int>
+ :return:
+ """
+ if level in LOGGING_LEVELS:
+ self.logger.setLevel(LOGGING_LEVELS[level])
+
+ def debug(self, *msg, **kwargs):
+ self.logger.debug(' '.join(map(str, msg)), **kwargs)
+
+ def info(self, *msg, **kwargs):
+ self.logger.info(' '.join(map(str, msg)), **kwargs)
+
+ def warning(self, *msg, **kwargs):
+ self.logger.warning(' '.join(map(str, msg)), **kwargs)
+
+ def error(self, *msg, **kwargs):
+ self.logger.error(' '.join(map(str, msg)), **kwargs)
+
+ def alert(self, *msg, **kwargs):
+ self.logger.critical(' '.join(map(str, msg)), **kwargs)
+
+ @on_try_except_finally(on_finally=(exit, 1))
+ def fatal(self, *msg, **kwargs):
+ self.logger.critical(' '.join(map(str, msg)), **kwargs)
+
+
+class PythonDLogger(object):
+ def __init__(self, logger_name=PYTHON_D_LOG_NAME, log_fmt=PYTHON_D_LOG_LINE_FORMAT):
+ """
+ :param logger_name: <str>
+ :param log_fmt: <str>
+ """
+ self.logger = BaseLogger(logger_name, log_fmt=log_fmt)
+ self.module_name = 'plugin'
+ self.job_name = 'main'
+ self._logger_counters = LoggerCounters()
+
+ _LOG_TRACEBACK = False
+
+ @property
+ def log_traceback(self):
+ return PythonDLogger._LOG_TRACEBACK
+
+ @log_traceback.setter
+ def log_traceback(self, value):
+ PythonDLogger._LOG_TRACEBACK = value
+
+ def debug(self, *msg):
+ self.logger.debug(*msg, extra={'module_name': self.module_name,
+ 'job_name': self.job_name or self.module_name})
+
+ def info(self, *msg):
+ self.logger.info(*msg, extra={'module_name': self.module_name,
+ 'job_name': self.job_name or self.module_name})
+
+ def warning(self, *msg):
+ self.logger.warning(*msg, extra={'module_name': self.module_name,
+ 'job_name': self.job_name or self.module_name})
+
+ @add_traceback
+ def error(self, *msg):
+ self.logger.error(*msg, extra={'module_name': self.module_name,
+ 'job_name': self.job_name or self.module_name})
+
+ @add_traceback
+ def alert(self, *msg):
+ self.logger.alert(*msg, extra={'module_name': self.module_name,
+ 'job_name': self.job_name or self.module_name})
+
+ def fatal(self, *msg):
+ self.logger.fatal(*msg, extra={'module_name': self.module_name,
+ 'job_name': self.job_name or self.module_name})
+
+
+class PythonDLimitedLogger(PythonDLogger):
+ @limiter()
+ def info(self, *msg):
+ PythonDLogger.info(self, *msg)
+
+ @limiter()
+ def warning(self, *msg):
+ PythonDLogger.warning(self, *msg)
+
+ @limiter()
+ def error(self, *msg):
+ PythonDLogger.error(self, *msg)
+
+ @limiter()
+ def alert(self, *msg):
+ PythonDLogger.alert(self, *msg)
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/__init__.py b/collectors/python.d.plugin/python_modules/pyyaml2/__init__.py
new file mode 100644
index 000000000..4d560e438
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/__init__.py
@@ -0,0 +1,316 @@
+# SPDX-License-Identifier: MIT
+
+from error import *
+
+from tokens import *
+from events import *
+from nodes import *
+
+from loader import *
+from dumper import *
+
+__version__ = '3.11'
+
+try:
+ from cyaml import *
+ __with_libyaml__ = True
+except ImportError:
+ __with_libyaml__ = False
+
+def scan(stream, Loader=Loader):
+ """
+ Scan a YAML stream and produce scanning tokens.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_token():
+ yield loader.get_token()
+ finally:
+ loader.dispose()
+
+def parse(stream, Loader=Loader):
+ """
+ Parse a YAML stream and produce parsing events.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_event():
+ yield loader.get_event()
+ finally:
+ loader.dispose()
+
+def compose(stream, Loader=Loader):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding representation tree.
+ """
+ loader = Loader(stream)
+ try:
+ return loader.get_single_node()
+ finally:
+ loader.dispose()
+
+def compose_all(stream, Loader=Loader):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding representation trees.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_node():
+ yield loader.get_node()
+ finally:
+ loader.dispose()
+
+def load(stream, Loader=Loader):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ """
+ loader = Loader(stream)
+ try:
+ return loader.get_single_data()
+ finally:
+ loader.dispose()
+
+def load_all(stream, Loader=Loader):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_data():
+ yield loader.get_data()
+ finally:
+ loader.dispose()
+
+def safe_load(stream):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ Resolve only basic YAML tags.
+ """
+ return load(stream, SafeLoader)
+
+def safe_load_all(stream):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ Resolve only basic YAML tags.
+ """
+ return load_all(stream, SafeLoader)
+
+def emit(events, stream=None, Dumper=Dumper,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None):
+ """
+ Emit YAML parsing events into a stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ from StringIO import StringIO
+ stream = StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ try:
+ for event in events:
+ dumper.emit(event)
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def serialize_all(nodes, stream=None, Dumper=Dumper,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding='utf-8', explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ """
+ Serialize a sequence of representation trees into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ from StringIO import StringIO
+ else:
+ from cStringIO import StringIO
+ stream = StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break,
+ encoding=encoding, version=version, tags=tags,
+ explicit_start=explicit_start, explicit_end=explicit_end)
+ try:
+ dumper.open()
+ for node in nodes:
+ dumper.serialize(node)
+ dumper.close()
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def serialize(node, stream=None, Dumper=Dumper, **kwds):
+ """
+ Serialize a representation tree into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ return serialize_all([node], stream, Dumper=Dumper, **kwds)
+
+def dump_all(documents, stream=None, Dumper=Dumper,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding='utf-8', explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ from StringIO import StringIO
+ else:
+ from cStringIO import StringIO
+ stream = StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, default_style=default_style,
+ default_flow_style=default_flow_style,
+ canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break,
+ encoding=encoding, version=version, tags=tags,
+ explicit_start=explicit_start, explicit_end=explicit_end)
+ try:
+ dumper.open()
+ for data in documents:
+ dumper.represent(data)
+ dumper.close()
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def dump(data, stream=None, Dumper=Dumper, **kwds):
+ """
+ Serialize a Python object into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all([data], stream, Dumper=Dumper, **kwds)
+
+def safe_dump_all(documents, stream=None, **kwds):
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
+
+def safe_dump(data, stream=None, **kwds):
+ """
+ Serialize a Python object into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all([data], stream, Dumper=SafeDumper, **kwds)
+
+def add_implicit_resolver(tag, regexp, first=None,
+ Loader=Loader, Dumper=Dumper):
+ """
+ Add an implicit scalar detector.
+ If an implicit scalar value matches the given regexp,
+ the corresponding tag is assigned to the scalar.
+ first is a sequence of possible initial characters or None.
+ """
+ Loader.add_implicit_resolver(tag, regexp, first)
+ Dumper.add_implicit_resolver(tag, regexp, first)
+
+def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper):
+ """
+ Add a path based resolver for the given tag.
+ A path is a list of keys that forms a path
+ to a node in the representation tree.
+ Keys can be string values, integers, or None.
+ """
+ Loader.add_path_resolver(tag, path, kind)
+ Dumper.add_path_resolver(tag, path, kind)
+
+def add_constructor(tag, constructor, Loader=Loader):
+ """
+ Add a constructor for the given tag.
+ Constructor is a function that accepts a Loader instance
+ and a node object and produces the corresponding Python object.
+ """
+ Loader.add_constructor(tag, constructor)
+
+def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader):
+ """
+ Add a multi-constructor for the given tag prefix.
+ Multi-constructor is called for a node if its tag starts with tag_prefix.
+ Multi-constructor accepts a Loader instance, a tag suffix,
+ and a node object and produces the corresponding Python object.
+ """
+ Loader.add_multi_constructor(tag_prefix, multi_constructor)
+
+def add_representer(data_type, representer, Dumper=Dumper):
+ """
+ Add a representer for the given type.
+ Representer is a function accepting a Dumper instance
+ and an instance of the given data type
+ and producing the corresponding representation node.
+ """
+ Dumper.add_representer(data_type, representer)
+
+def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
+ """
+ Add a representer for the given type.
+ Multi-representer is a function accepting a Dumper instance
+ and an instance of the given data type or subtype
+ and producing the corresponding representation node.
+ """
+ Dumper.add_multi_representer(data_type, multi_representer)
+
+class YAMLObjectMetaclass(type):
+ """
+ The metaclass for YAMLObject.
+ """
+ def __init__(cls, name, bases, kwds):
+ super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
+ if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
+ cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
+ cls.yaml_dumper.add_representer(cls, cls.to_yaml)
+
+class YAMLObject(object):
+ """
+ An object that can dump itself to a YAML stream
+ and load itself from a YAML stream.
+ """
+
+ __metaclass__ = YAMLObjectMetaclass
+ __slots__ = () # no direct instantiation, so allow immutable subclasses
+
+ yaml_loader = Loader
+ yaml_dumper = Dumper
+
+ yaml_tag = None
+ yaml_flow_style = None
+
+ def from_yaml(cls, loader, node):
+ """
+ Convert a representation node to a Python object.
+ """
+ return loader.construct_yaml_object(node, cls)
+ from_yaml = classmethod(from_yaml)
+
+ def to_yaml(cls, dumper, data):
+ """
+ Convert a Python object to a representation node.
+ """
+ return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
+ flow_style=cls.yaml_flow_style)
+ to_yaml = classmethod(to_yaml)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/composer.py b/collectors/python.d.plugin/python_modules/pyyaml2/composer.py
new file mode 100644
index 000000000..6b41b8067
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/composer.py
@@ -0,0 +1,140 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['Composer', 'ComposerError']
+
+from error import MarkedYAMLError
+from events import *
+from nodes import *
+
+class ComposerError(MarkedYAMLError):
+ pass
+
+class Composer(object):
+
+ def __init__(self):
+ self.anchors = {}
+
+ def check_node(self):
+ # Drop the STREAM-START event.
+ if self.check_event(StreamStartEvent):
+ self.get_event()
+
+ # If there are more documents available?
+ return not self.check_event(StreamEndEvent)
+
+ def get_node(self):
+ # Get the root node of the next document.
+ if not self.check_event(StreamEndEvent):
+ return self.compose_document()
+
+ def get_single_node(self):
+ # Drop the STREAM-START event.
+ self.get_event()
+
+ # Compose a document if the stream is not empty.
+ document = None
+ if not self.check_event(StreamEndEvent):
+ document = self.compose_document()
+
+ # Ensure that the stream contains no more documents.
+ if not self.check_event(StreamEndEvent):
+ event = self.get_event()
+ raise ComposerError("expected a single document in the stream",
+ document.start_mark, "but found another document",
+ event.start_mark)
+
+ # Drop the STREAM-END event.
+ self.get_event()
+
+ return document
+
+ def compose_document(self):
+ # Drop the DOCUMENT-START event.
+ self.get_event()
+
+ # Compose the root node.
+ node = self.compose_node(None, None)
+
+ # Drop the DOCUMENT-END event.
+ self.get_event()
+
+ self.anchors = {}
+ return node
+
+ def compose_node(self, parent, index):
+ if self.check_event(AliasEvent):
+ event = self.get_event()
+ anchor = event.anchor
+ if anchor not in self.anchors:
+ raise ComposerError(None, None, "found undefined alias %r"
+ % anchor.encode('utf-8'), event.start_mark)
+ return self.anchors[anchor]
+ event = self.peek_event()
+ anchor = event.anchor
+ if anchor is not None:
+ if anchor in self.anchors:
+ raise ComposerError("found duplicate anchor %r; first occurence"
+ % anchor.encode('utf-8'), self.anchors[anchor].start_mark,
+ "second occurence", event.start_mark)
+ self.descend_resolver(parent, index)
+ if self.check_event(ScalarEvent):
+ node = self.compose_scalar_node(anchor)
+ elif self.check_event(SequenceStartEvent):
+ node = self.compose_sequence_node(anchor)
+ elif self.check_event(MappingStartEvent):
+ node = self.compose_mapping_node(anchor)
+ self.ascend_resolver()
+ return node
+
+ def compose_scalar_node(self, anchor):
+ event = self.get_event()
+ tag = event.tag
+ if tag is None or tag == u'!':
+ tag = self.resolve(ScalarNode, event.value, event.implicit)
+ node = ScalarNode(tag, event.value,
+ event.start_mark, event.end_mark, style=event.style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ return node
+
+ def compose_sequence_node(self, anchor):
+ start_event = self.get_event()
+ tag = start_event.tag
+ if tag is None or tag == u'!':
+ tag = self.resolve(SequenceNode, None, start_event.implicit)
+ node = SequenceNode(tag, [],
+ start_event.start_mark, None,
+ flow_style=start_event.flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ index = 0
+ while not self.check_event(SequenceEndEvent):
+ node.value.append(self.compose_node(node, index))
+ index += 1
+ end_event = self.get_event()
+ node.end_mark = end_event.end_mark
+ return node
+
+ def compose_mapping_node(self, anchor):
+ start_event = self.get_event()
+ tag = start_event.tag
+ if tag is None or tag == u'!':
+ tag = self.resolve(MappingNode, None, start_event.implicit)
+ node = MappingNode(tag, [],
+ start_event.start_mark, None,
+ flow_style=start_event.flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ while not self.check_event(MappingEndEvent):
+ #key_event = self.peek_event()
+ item_key = self.compose_node(node, None)
+ #if item_key in node.value:
+ # raise ComposerError("while composing a mapping", start_event.start_mark,
+ # "found duplicate key", key_event.start_mark)
+ item_value = self.compose_node(node, item_key)
+ #node.value[item_key] = item_value
+ node.value.append((item_key, item_value))
+ end_event = self.get_event()
+ node.end_mark = end_event.end_mark
+ return node
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/constructor.py b/collectors/python.d.plugin/python_modules/pyyaml2/constructor.py
new file mode 100644
index 000000000..8ad1b90a7
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/constructor.py
@@ -0,0 +1,676 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
+ 'ConstructorError']
+
+from error import *
+from nodes import *
+
+import datetime
+
+import binascii, re, sys, types
+
+class ConstructorError(MarkedYAMLError):
+ pass
+
+class BaseConstructor(object):
+
+ yaml_constructors = {}
+ yaml_multi_constructors = {}
+
+ def __init__(self):
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.state_generators = []
+ self.deep_construct = False
+
+ def check_data(self):
+ # If there are more documents available?
+ return self.check_node()
+
+ def get_data(self):
+ # Construct and return the next document.
+ if self.check_node():
+ return self.construct_document(self.get_node())
+
+ def get_single_data(self):
+ # Ensure that the stream contains a single document and construct it.
+ node = self.get_single_node()
+ if node is not None:
+ return self.construct_document(node)
+ return None
+
+ def construct_document(self, node):
+ data = self.construct_object(node)
+ while self.state_generators:
+ state_generators = self.state_generators
+ self.state_generators = []
+ for generator in state_generators:
+ for dummy in generator:
+ pass
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.deep_construct = False
+ return data
+
+ def construct_object(self, node, deep=False):
+ if node in self.constructed_objects:
+ return self.constructed_objects[node]
+ if deep:
+ old_deep = self.deep_construct
+ self.deep_construct = True
+ if node in self.recursive_objects:
+ raise ConstructorError(None, None,
+ "found unconstructable recursive node", node.start_mark)
+ self.recursive_objects[node] = None
+ constructor = None
+ tag_suffix = None
+ if node.tag in self.yaml_constructors:
+ constructor = self.yaml_constructors[node.tag]
+ else:
+ for tag_prefix in self.yaml_multi_constructors:
+ if node.tag.startswith(tag_prefix):
+ tag_suffix = node.tag[len(tag_prefix):]
+ constructor = self.yaml_multi_constructors[tag_prefix]
+ break
+ else:
+ if None in self.yaml_multi_constructors:
+ tag_suffix = node.tag
+ constructor = self.yaml_multi_constructors[None]
+ elif None in self.yaml_constructors:
+ constructor = self.yaml_constructors[None]
+ elif isinstance(node, ScalarNode):
+ constructor = self.__class__.construct_scalar
+ elif isinstance(node, SequenceNode):
+ constructor = self.__class__.construct_sequence
+ elif isinstance(node, MappingNode):
+ constructor = self.__class__.construct_mapping
+ if tag_suffix is None:
+ data = constructor(self, node)
+ else:
+ data = constructor(self, tag_suffix, node)
+ if isinstance(data, types.GeneratorType):
+ generator = data
+ data = generator.next()
+ if self.deep_construct:
+ for dummy in generator:
+ pass
+ else:
+ self.state_generators.append(generator)
+ self.constructed_objects[node] = data
+ del self.recursive_objects[node]
+ if deep:
+ self.deep_construct = old_deep
+ return data
+
+ def construct_scalar(self, node):
+ if not isinstance(node, ScalarNode):
+ raise ConstructorError(None, None,
+ "expected a scalar node, but found %s" % node.id,
+ node.start_mark)
+ return node.value
+
+ def construct_sequence(self, node, deep=False):
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError(None, None,
+ "expected a sequence node, but found %s" % node.id,
+ node.start_mark)
+ return [self.construct_object(child, deep=deep)
+ for child in node.value]
+
+ def construct_mapping(self, node, deep=False):
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+ mapping = {}
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ try:
+ hash(key)
+ except TypeError, exc:
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "found unacceptable key (%s)" % exc, key_node.start_mark)
+ value = self.construct_object(value_node, deep=deep)
+ mapping[key] = value
+ return mapping
+
+ def construct_pairs(self, node, deep=False):
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+ pairs = []
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ value = self.construct_object(value_node, deep=deep)
+ pairs.append((key, value))
+ return pairs
+
+ def add_constructor(cls, tag, constructor):
+ if not 'yaml_constructors' in cls.__dict__:
+ cls.yaml_constructors = cls.yaml_constructors.copy()
+ cls.yaml_constructors[tag] = constructor
+ add_constructor = classmethod(add_constructor)
+
+ def add_multi_constructor(cls, tag_prefix, multi_constructor):
+ if not 'yaml_multi_constructors' in cls.__dict__:
+ cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
+ cls.yaml_multi_constructors[tag_prefix] = multi_constructor
+ add_multi_constructor = classmethod(add_multi_constructor)
+
+class SafeConstructor(BaseConstructor):
+
+ def construct_scalar(self, node):
+ if isinstance(node, MappingNode):
+ for key_node, value_node in node.value:
+ if key_node.tag == u'tag:yaml.org,2002:value':
+ return self.construct_scalar(value_node)
+ return BaseConstructor.construct_scalar(self, node)
+
+ def flatten_mapping(self, node):
+ merge = []
+ index = 0
+ while index < len(node.value):
+ key_node, value_node = node.value[index]
+ if key_node.tag == u'tag:yaml.org,2002:merge':
+ del node.value[index]
+ if isinstance(value_node, MappingNode):
+ self.flatten_mapping(value_node)
+ merge.extend(value_node.value)
+ elif isinstance(value_node, SequenceNode):
+ submerge = []
+ for subnode in value_node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing a mapping",
+ node.start_mark,
+ "expected a mapping for merging, but found %s"
+ % subnode.id, subnode.start_mark)
+ self.flatten_mapping(subnode)
+ submerge.append(subnode.value)
+ submerge.reverse()
+ for value in submerge:
+ merge.extend(value)
+ else:
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "expected a mapping or list of mappings for merging, but found %s"
+ % value_node.id, value_node.start_mark)
+ elif key_node.tag == u'tag:yaml.org,2002:value':
+ key_node.tag = u'tag:yaml.org,2002:str'
+ index += 1
+ else:
+ index += 1
+ if merge:
+ node.value = merge + node.value
+
+ def construct_mapping(self, node, deep=False):
+ if isinstance(node, MappingNode):
+ self.flatten_mapping(node)
+ return BaseConstructor.construct_mapping(self, node, deep=deep)
+
+ def construct_yaml_null(self, node):
+ self.construct_scalar(node)
+ return None
+
+ bool_values = {
+ u'yes': True,
+ u'no': False,
+ u'true': True,
+ u'false': False,
+ u'on': True,
+ u'off': False,
+ }
+
+ def construct_yaml_bool(self, node):
+ value = self.construct_scalar(node)
+ return self.bool_values[value.lower()]
+
+ def construct_yaml_int(self, node):
+ value = str(self.construct_scalar(node))
+ value = value.replace('_', '')
+ sign = +1
+ if value[0] == '-':
+ sign = -1
+ if value[0] in '+-':
+ value = value[1:]
+ if value == '0':
+ return 0
+ elif value.startswith('0b'):
+ return sign*int(value[2:], 2)
+ elif value.startswith('0x'):
+ return sign*int(value[2:], 16)
+ elif value[0] == '0':
+ return sign*int(value, 8)
+ elif ':' in value:
+ digits = [int(part) for part in value.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0
+ for digit in digits:
+ value += digit*base
+ base *= 60
+ return sign*value
+ else:
+ return sign*int(value)
+
+ inf_value = 1e300
+ while inf_value != inf_value*inf_value:
+ inf_value *= inf_value
+ nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
+
+ def construct_yaml_float(self, node):
+ value = str(self.construct_scalar(node))
+ value = value.replace('_', '').lower()
+ sign = +1
+ if value[0] == '-':
+ sign = -1
+ if value[0] in '+-':
+ value = value[1:]
+ if value == '.inf':
+ return sign*self.inf_value
+ elif value == '.nan':
+ return self.nan_value
+ elif ':' in value:
+ digits = [float(part) for part in value.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0.0
+ for digit in digits:
+ value += digit*base
+ base *= 60
+ return sign*value
+ else:
+ return sign*float(value)
+
+ def construct_yaml_binary(self, node):
+ value = self.construct_scalar(node)
+ try:
+ return str(value).decode('base64')
+ except (binascii.Error, UnicodeEncodeError), exc:
+ raise ConstructorError(None, None,
+ "failed to decode base64 data: %s" % exc, node.start_mark)
+
+ timestamp_regexp = re.compile(
+ ur'''^(?P<year>[0-9][0-9][0-9][0-9])
+ -(?P<month>[0-9][0-9]?)
+ -(?P<day>[0-9][0-9]?)
+ (?:(?:[Tt]|[ \t]+)
+ (?P<hour>[0-9][0-9]?)
+ :(?P<minute>[0-9][0-9])
+ :(?P<second>[0-9][0-9])
+ (?:\.(?P<fraction>[0-9]*))?
+ (?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
+ (?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
+
+ def construct_yaml_timestamp(self, node):
+ value = self.construct_scalar(node)
+ match = self.timestamp_regexp.match(node.value)
+ values = match.groupdict()
+ year = int(values['year'])
+ month = int(values['month'])
+ day = int(values['day'])
+ if not values['hour']:
+ return datetime.date(year, month, day)
+ hour = int(values['hour'])
+ minute = int(values['minute'])
+ second = int(values['second'])
+ fraction = 0
+ if values['fraction']:
+ fraction = values['fraction'][:6]
+ while len(fraction) < 6:
+ fraction += '0'
+ fraction = int(fraction)
+ delta = None
+ if values['tz_sign']:
+ tz_hour = int(values['tz_hour'])
+ tz_minute = int(values['tz_minute'] or 0)
+ delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
+ if values['tz_sign'] == '-':
+ delta = -delta
+ data = datetime.datetime(year, month, day, hour, minute, second, fraction)
+ if delta:
+ data -= delta
+ return data
+
+ def construct_yaml_omap(self, node):
+ # Note: we do not check for duplicate keys, because it's too
+ # CPU-expensive.
+ omap = []
+ yield omap
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a sequence, but found %s" % node.id, node.start_mark)
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a mapping of length 1, but found %s" % subnode.id,
+ subnode.start_mark)
+ if len(subnode.value) != 1:
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a single mapping item, but found %d items" % len(subnode.value),
+ subnode.start_mark)
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ omap.append((key, value))
+
+ def construct_yaml_pairs(self, node):
+ # Note: the same code as `construct_yaml_omap`.
+ pairs = []
+ yield pairs
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a sequence, but found %s" % node.id, node.start_mark)
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a mapping of length 1, but found %s" % subnode.id,
+ subnode.start_mark)
+ if len(subnode.value) != 1:
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a single mapping item, but found %d items" % len(subnode.value),
+ subnode.start_mark)
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ pairs.append((key, value))
+
+ def construct_yaml_set(self, node):
+ data = set()
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_str(self, node):
+ value = self.construct_scalar(node)
+ try:
+ return value.encode('ascii')
+ except UnicodeEncodeError:
+ return value
+
+ def construct_yaml_seq(self, node):
+ data = []
+ yield data
+ data.extend(self.construct_sequence(node))
+
+ def construct_yaml_map(self, node):
+ data = {}
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_object(self, node, cls):
+ data = cls.__new__(cls)
+ yield data
+ if hasattr(data, '__setstate__'):
+ state = self.construct_mapping(node, deep=True)
+ data.__setstate__(state)
+ else:
+ state = self.construct_mapping(node)
+ data.__dict__.update(state)
+
+ def construct_undefined(self, node):
+ raise ConstructorError(None, None,
+ "could not determine a constructor for the tag %r" % node.tag.encode('utf-8'),
+ node.start_mark)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:null',
+ SafeConstructor.construct_yaml_null)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:bool',
+ SafeConstructor.construct_yaml_bool)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:int',
+ SafeConstructor.construct_yaml_int)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:float',
+ SafeConstructor.construct_yaml_float)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:binary',
+ SafeConstructor.construct_yaml_binary)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:timestamp',
+ SafeConstructor.construct_yaml_timestamp)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:omap',
+ SafeConstructor.construct_yaml_omap)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:pairs',
+ SafeConstructor.construct_yaml_pairs)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:set',
+ SafeConstructor.construct_yaml_set)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:str',
+ SafeConstructor.construct_yaml_str)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:seq',
+ SafeConstructor.construct_yaml_seq)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:map',
+ SafeConstructor.construct_yaml_map)
+
+SafeConstructor.add_constructor(None,
+ SafeConstructor.construct_undefined)
+
+class Constructor(SafeConstructor):
+
+ def construct_python_str(self, node):
+ return self.construct_scalar(node).encode('utf-8')
+
+ def construct_python_unicode(self, node):
+ return self.construct_scalar(node)
+
+ def construct_python_long(self, node):
+ return long(self.construct_yaml_int(node))
+
+ def construct_python_complex(self, node):
+ return complex(self.construct_scalar(node))
+
+ def construct_python_tuple(self, node):
+ return tuple(self.construct_sequence(node))
+
+ def find_python_module(self, name, mark):
+ if not name:
+ raise ConstructorError("while constructing a Python module", mark,
+ "expected non-empty name appended to the tag", mark)
+ try:
+ __import__(name)
+ except ImportError, exc:
+ raise ConstructorError("while constructing a Python module", mark,
+ "cannot find module %r (%s)" % (name.encode('utf-8'), exc), mark)
+ return sys.modules[name]
+
+ def find_python_name(self, name, mark):
+ if not name:
+ raise ConstructorError("while constructing a Python object", mark,
+ "expected non-empty name appended to the tag", mark)
+ if u'.' in name:
+ module_name, object_name = name.rsplit('.', 1)
+ else:
+ module_name = '__builtin__'
+ object_name = name
+ try:
+ __import__(module_name)
+ except ImportError, exc:
+ raise ConstructorError("while constructing a Python object", mark,
+ "cannot find module %r (%s)" % (module_name.encode('utf-8'), exc), mark)
+ module = sys.modules[module_name]
+ if not hasattr(module, object_name):
+ raise ConstructorError("while constructing a Python object", mark,
+ "cannot find %r in the module %r" % (object_name.encode('utf-8'),
+ module.__name__), mark)
+ return getattr(module, object_name)
+
+ def construct_python_name(self, suffix, node):
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError("while constructing a Python name", node.start_mark,
+ "expected the empty value, but found %r" % value.encode('utf-8'),
+ node.start_mark)
+ return self.find_python_name(suffix, node.start_mark)
+
+ def construct_python_module(self, suffix, node):
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError("while constructing a Python module", node.start_mark,
+ "expected the empty value, but found %r" % value.encode('utf-8'),
+ node.start_mark)
+ return self.find_python_module(suffix, node.start_mark)
+
+ class classobj: pass
+
+ def make_python_instance(self, suffix, node,
+ args=None, kwds=None, newobj=False):
+ if not args:
+ args = []
+ if not kwds:
+ kwds = {}
+ cls = self.find_python_name(suffix, node.start_mark)
+ if newobj and isinstance(cls, type(self.classobj)) \
+ and not args and not kwds:
+ instance = self.classobj()
+ instance.__class__ = cls
+ return instance
+ elif newobj and isinstance(cls, type):
+ return cls.__new__(cls, *args, **kwds)
+ else:
+ return cls(*args, **kwds)
+
+ def set_python_instance_state(self, instance, state):
+ if hasattr(instance, '__setstate__'):
+ instance.__setstate__(state)
+ else:
+ slotstate = {}
+ if isinstance(state, tuple) and len(state) == 2:
+ state, slotstate = state
+ if hasattr(instance, '__dict__'):
+ instance.__dict__.update(state)
+ elif state:
+ slotstate.update(state)
+ for key, value in slotstate.items():
+ setattr(object, key, value)
+
+ def construct_python_object(self, suffix, node):
+ # Format:
+ # !!python/object:module.name { ... state ... }
+ instance = self.make_python_instance(suffix, node, newobj=True)
+ yield instance
+ deep = hasattr(instance, '__setstate__')
+ state = self.construct_mapping(node, deep=deep)
+ self.set_python_instance_state(instance, state)
+
+ def construct_python_object_apply(self, suffix, node, newobj=False):
+ # Format:
+ # !!python/object/apply # (or !!python/object/new)
+ # args: [ ... arguments ... ]
+ # kwds: { ... keywords ... }
+ # state: ... state ...
+ # listitems: [ ... listitems ... ]
+ # dictitems: { ... dictitems ... }
+ # or short format:
+ # !!python/object/apply [ ... arguments ... ]
+ # The difference between !!python/object/apply and !!python/object/new
+ # is how an object is created, check make_python_instance for details.
+ if isinstance(node, SequenceNode):
+ args = self.construct_sequence(node, deep=True)
+ kwds = {}
+ state = {}
+ listitems = []
+ dictitems = {}
+ else:
+ value = self.construct_mapping(node, deep=True)
+ args = value.get('args', [])
+ kwds = value.get('kwds', {})
+ state = value.get('state', {})
+ listitems = value.get('listitems', [])
+ dictitems = value.get('dictitems', {})
+ instance = self.make_python_instance(suffix, node, args, kwds, newobj)
+ if state:
+ self.set_python_instance_state(instance, state)
+ if listitems:
+ instance.extend(listitems)
+ if dictitems:
+ for key in dictitems:
+ instance[key] = dictitems[key]
+ return instance
+
+ def construct_python_object_new(self, suffix, node):
+ return self.construct_python_object_apply(suffix, node, newobj=True)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/none',
+ Constructor.construct_yaml_null)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/bool',
+ Constructor.construct_yaml_bool)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/str',
+ Constructor.construct_python_str)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/unicode',
+ Constructor.construct_python_unicode)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/int',
+ Constructor.construct_yaml_int)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/long',
+ Constructor.construct_python_long)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/float',
+ Constructor.construct_yaml_float)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/complex',
+ Constructor.construct_python_complex)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/list',
+ Constructor.construct_yaml_seq)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/tuple',
+ Constructor.construct_python_tuple)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/dict',
+ Constructor.construct_yaml_map)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/name:',
+ Constructor.construct_python_name)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/module:',
+ Constructor.construct_python_module)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/object:',
+ Constructor.construct_python_object)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/object/apply:',
+ Constructor.construct_python_object_apply)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/object/new:',
+ Constructor.construct_python_object_new)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py b/collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py
new file mode 100644
index 000000000..2858ab479
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
+ 'CBaseDumper', 'CSafeDumper', 'CDumper']
+
+from _yaml import CParser, CEmitter
+
+from constructor import *
+
+from serializer import *
+from representer import *
+
+from resolver import *
+
+class CBaseLoader(CParser, BaseConstructor, BaseResolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ BaseConstructor.__init__(self)
+ BaseResolver.__init__(self)
+
+class CSafeLoader(CParser, SafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ SafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class CLoader(CParser, Constructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
+
+class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ SafeRepresenter.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class CDumper(CEmitter, Serializer, Representer, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/dumper.py b/collectors/python.d.plugin/python_modules/pyyaml2/dumper.py
new file mode 100644
index 000000000..3685cbeeb
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/dumper.py
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
+
+from emitter import *
+from serializer import *
+from representer import *
+from resolver import *
+
+class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ SafeRepresenter.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class Dumper(Emitter, Serializer, Representer, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/emitter.py b/collectors/python.d.plugin/python_modules/pyyaml2/emitter.py
new file mode 100644
index 000000000..9a460a0fd
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/emitter.py
@@ -0,0 +1,1141 @@
+# SPDX-License-Identifier: MIT
+
+# Emitter expects events obeying the following grammar:
+# stream ::= STREAM-START document* STREAM-END
+# document ::= DOCUMENT-START node DOCUMENT-END
+# node ::= SCALAR | sequence | mapping
+# sequence ::= SEQUENCE-START node* SEQUENCE-END
+# mapping ::= MAPPING-START (node node)* MAPPING-END
+
+__all__ = ['Emitter', 'EmitterError']
+
+from error import YAMLError
+from events import *
+
+class EmitterError(YAMLError):
+ pass
+
+class ScalarAnalysis(object):
+ def __init__(self, scalar, empty, multiline,
+ allow_flow_plain, allow_block_plain,
+ allow_single_quoted, allow_double_quoted,
+ allow_block):
+ self.scalar = scalar
+ self.empty = empty
+ self.multiline = multiline
+ self.allow_flow_plain = allow_flow_plain
+ self.allow_block_plain = allow_block_plain
+ self.allow_single_quoted = allow_single_quoted
+ self.allow_double_quoted = allow_double_quoted
+ self.allow_block = allow_block
+
+class Emitter(object):
+
+ DEFAULT_TAG_PREFIXES = {
+ u'!' : u'!',
+ u'tag:yaml.org,2002:' : u'!!',
+ }
+
+ def __init__(self, stream, canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None):
+
+ # The stream should have the methods `write` and possibly `flush`.
+ self.stream = stream
+
+ # Encoding can be overriden by STREAM-START.
+ self.encoding = None
+
+ # Emitter is a state machine with a stack of states to handle nested
+ # structures.
+ self.states = []
+ self.state = self.expect_stream_start
+
+ # Current event and the event queue.
+ self.events = []
+ self.event = None
+
+ # The current indentation level and the stack of previous indents.
+ self.indents = []
+ self.indent = None
+
+ # Flow level.
+ self.flow_level = 0
+
+ # Contexts.
+ self.root_context = False
+ self.sequence_context = False
+ self.mapping_context = False
+ self.simple_key_context = False
+
+ # Characteristics of the last emitted character:
+ # - current position.
+ # - is it a whitespace?
+ # - is it an indention character
+ # (indentation space, '-', '?', or ':')?
+ self.line = 0
+ self.column = 0
+ self.whitespace = True
+ self.indention = True
+
+ # Whether the document requires an explicit document indicator
+ self.open_ended = False
+
+ # Formatting details.
+ self.canonical = canonical
+ self.allow_unicode = allow_unicode
+ self.best_indent = 2
+ if indent and 1 < indent < 10:
+ self.best_indent = indent
+ self.best_width = 80
+ if width and width > self.best_indent*2:
+ self.best_width = width
+ self.best_line_break = u'\n'
+ if line_break in [u'\r', u'\n', u'\r\n']:
+ self.best_line_break = line_break
+
+ # Tag prefixes.
+ self.tag_prefixes = None
+
+ # Prepared anchor and tag.
+ self.prepared_anchor = None
+ self.prepared_tag = None
+
+ # Scalar analysis and style.
+ self.analysis = None
+ self.style = None
+
+ def dispose(self):
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def emit(self, event):
+ self.events.append(event)
+ while not self.need_more_events():
+ self.event = self.events.pop(0)
+ self.state()
+ self.event = None
+
+ # In some cases, we wait for a few next events before emitting.
+
+ def need_more_events(self):
+ if not self.events:
+ return True
+ event = self.events[0]
+ if isinstance(event, DocumentStartEvent):
+ return self.need_events(1)
+ elif isinstance(event, SequenceStartEvent):
+ return self.need_events(2)
+ elif isinstance(event, MappingStartEvent):
+ return self.need_events(3)
+ else:
+ return False
+
+ def need_events(self, count):
+ level = 0
+ for event in self.events[1:]:
+ if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
+ level += 1
+ elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
+ level -= 1
+ elif isinstance(event, StreamEndEvent):
+ level = -1
+ if level < 0:
+ return False
+ return (len(self.events) < count+1)
+
+ def increase_indent(self, flow=False, indentless=False):
+ self.indents.append(self.indent)
+ if self.indent is None:
+ if flow:
+ self.indent = self.best_indent
+ else:
+ self.indent = 0
+ elif not indentless:
+ self.indent += self.best_indent
+
+ # States.
+
+ # Stream handlers.
+
+ def expect_stream_start(self):
+ if isinstance(self.event, StreamStartEvent):
+ if self.event.encoding and not getattr(self.stream, 'encoding', None):
+ self.encoding = self.event.encoding
+ self.write_stream_start()
+ self.state = self.expect_first_document_start
+ else:
+ raise EmitterError("expected StreamStartEvent, but got %s"
+ % self.event)
+
+ def expect_nothing(self):
+ raise EmitterError("expected nothing, but got %s" % self.event)
+
+ # Document handlers.
+
+ def expect_first_document_start(self):
+ return self.expect_document_start(first=True)
+
+ def expect_document_start(self, first=False):
+ if isinstance(self.event, DocumentStartEvent):
+ if (self.event.version or self.event.tags) and self.open_ended:
+ self.write_indicator(u'...', True)
+ self.write_indent()
+ if self.event.version:
+ version_text = self.prepare_version(self.event.version)
+ self.write_version_directive(version_text)
+ self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
+ if self.event.tags:
+ handles = self.event.tags.keys()
+ handles.sort()
+ for handle in handles:
+ prefix = self.event.tags[handle]
+ self.tag_prefixes[prefix] = handle
+ handle_text = self.prepare_tag_handle(handle)
+ prefix_text = self.prepare_tag_prefix(prefix)
+ self.write_tag_directive(handle_text, prefix_text)
+ implicit = (first and not self.event.explicit and not self.canonical
+ and not self.event.version and not self.event.tags
+ and not self.check_empty_document())
+ if not implicit:
+ self.write_indent()
+ self.write_indicator(u'---', True)
+ if self.canonical:
+ self.write_indent()
+ self.state = self.expect_document_root
+ elif isinstance(self.event, StreamEndEvent):
+ if self.open_ended:
+ self.write_indicator(u'...', True)
+ self.write_indent()
+ self.write_stream_end()
+ self.state = self.expect_nothing
+ else:
+ raise EmitterError("expected DocumentStartEvent, but got %s"
+ % self.event)
+
+ def expect_document_end(self):
+ if isinstance(self.event, DocumentEndEvent):
+ self.write_indent()
+ if self.event.explicit:
+ self.write_indicator(u'...', True)
+ self.write_indent()
+ self.flush_stream()
+ self.state = self.expect_document_start
+ else:
+ raise EmitterError("expected DocumentEndEvent, but got %s"
+ % self.event)
+
+ def expect_document_root(self):
+ self.states.append(self.expect_document_end)
+ self.expect_node(root=True)
+
+ # Node handlers.
+
+ def expect_node(self, root=False, sequence=False, mapping=False,
+ simple_key=False):
+ self.root_context = root
+ self.sequence_context = sequence
+ self.mapping_context = mapping
+ self.simple_key_context = simple_key
+ if isinstance(self.event, AliasEvent):
+ self.expect_alias()
+ elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
+ self.process_anchor(u'&')
+ self.process_tag()
+ if isinstance(self.event, ScalarEvent):
+ self.expect_scalar()
+ elif isinstance(self.event, SequenceStartEvent):
+ if self.flow_level or self.canonical or self.event.flow_style \
+ or self.check_empty_sequence():
+ self.expect_flow_sequence()
+ else:
+ self.expect_block_sequence()
+ elif isinstance(self.event, MappingStartEvent):
+ if self.flow_level or self.canonical or self.event.flow_style \
+ or self.check_empty_mapping():
+ self.expect_flow_mapping()
+ else:
+ self.expect_block_mapping()
+ else:
+ raise EmitterError("expected NodeEvent, but got %s" % self.event)
+
+ def expect_alias(self):
+ if self.event.anchor is None:
+ raise EmitterError("anchor is not specified for alias")
+ self.process_anchor(u'*')
+ self.state = self.states.pop()
+
+ def expect_scalar(self):
+ self.increase_indent(flow=True)
+ self.process_scalar()
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+
+ # Flow sequence handlers.
+
+ def expect_flow_sequence(self):
+ self.write_indicator(u'[', True, whitespace=True)
+ self.flow_level += 1
+ self.increase_indent(flow=True)
+ self.state = self.expect_first_flow_sequence_item
+
+ def expect_first_flow_sequence_item(self):
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ self.write_indicator(u']', False)
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ def expect_flow_sequence_item(self):
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ if self.canonical:
+ self.write_indicator(u',', False)
+ self.write_indent()
+ self.write_indicator(u']', False)
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(u',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Flow mapping handlers.
+
+ def expect_flow_mapping(self):
+ self.write_indicator(u'{', True, whitespace=True)
+ self.flow_level += 1
+ self.increase_indent(flow=True)
+ self.state = self.expect_first_flow_mapping_key
+
+ def expect_first_flow_mapping_key(self):
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ self.write_indicator(u'}', False)
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator(u'?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_key(self):
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ if self.canonical:
+ self.write_indicator(u',', False)
+ self.write_indent()
+ self.write_indicator(u'}', False)
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(u',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator(u'?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_simple_value(self):
+ self.write_indicator(u':', False)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_value(self):
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.write_indicator(u':', True)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Block sequence handlers.
+
+ def expect_block_sequence(self):
+ indentless = (self.mapping_context and not self.indention)
+ self.increase_indent(flow=False, indentless=indentless)
+ self.state = self.expect_first_block_sequence_item
+
+ def expect_first_block_sequence_item(self):
+ return self.expect_block_sequence_item(first=True)
+
+ def expect_block_sequence_item(self, first=False):
+ if not first and isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ self.write_indent()
+ self.write_indicator(u'-', True, indention=True)
+ self.states.append(self.expect_block_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Block mapping handlers.
+
+ def expect_block_mapping(self):
+ self.increase_indent(flow=False)
+ self.state = self.expect_first_block_mapping_key
+
+ def expect_first_block_mapping_key(self):
+ return self.expect_block_mapping_key(first=True)
+
+ def expect_block_mapping_key(self, first=False):
+ if not first and isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ self.write_indent()
+ if self.check_simple_key():
+ self.states.append(self.expect_block_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator(u'?', True, indention=True)
+ self.states.append(self.expect_block_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_simple_value(self):
+ self.write_indicator(u':', False)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_value(self):
+ self.write_indent()
+ self.write_indicator(u':', True, indention=True)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Checkers.
+
+ def check_empty_sequence(self):
+ return (isinstance(self.event, SequenceStartEvent) and self.events
+ and isinstance(self.events[0], SequenceEndEvent))
+
+ def check_empty_mapping(self):
+ return (isinstance(self.event, MappingStartEvent) and self.events
+ and isinstance(self.events[0], MappingEndEvent))
+
+ def check_empty_document(self):
+ if not isinstance(self.event, DocumentStartEvent) or not self.events:
+ return False
+ event = self.events[0]
+ return (isinstance(event, ScalarEvent) and event.anchor is None
+ and event.tag is None and event.implicit and event.value == u'')
+
+ def check_simple_key(self):
+ length = 0
+ if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ length += len(self.prepared_anchor)
+ if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \
+ and self.event.tag is not None:
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(self.event.tag)
+ length += len(self.prepared_tag)
+ if isinstance(self.event, ScalarEvent):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ length += len(self.analysis.scalar)
+ return (length < 128 and (isinstance(self.event, AliasEvent)
+ or (isinstance(self.event, ScalarEvent)
+ and not self.analysis.empty and not self.analysis.multiline)
+ or self.check_empty_sequence() or self.check_empty_mapping()))
+
+ # Anchor, Tag, and Scalar processors.
+
+ def process_anchor(self, indicator):
+ if self.event.anchor is None:
+ self.prepared_anchor = None
+ return
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ if self.prepared_anchor:
+ self.write_indicator(indicator+self.prepared_anchor, True)
+ self.prepared_anchor = None
+
+ def process_tag(self):
+ tag = self.event.tag
+ if isinstance(self.event, ScalarEvent):
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ if ((not self.canonical or tag is None) and
+ ((self.style == '' and self.event.implicit[0])
+ or (self.style != '' and self.event.implicit[1]))):
+ self.prepared_tag = None
+ return
+ if self.event.implicit[0] and tag is None:
+ tag = u'!'
+ self.prepared_tag = None
+ else:
+ if (not self.canonical or tag is None) and self.event.implicit:
+ self.prepared_tag = None
+ return
+ if tag is None:
+ raise EmitterError("tag is not specified")
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(tag)
+ if self.prepared_tag:
+ self.write_indicator(self.prepared_tag, True)
+ self.prepared_tag = None
+
+ def choose_scalar_style(self):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.event.style == '"' or self.canonical:
+ return '"'
+ if not self.event.style and self.event.implicit[0]:
+ if (not (self.simple_key_context and
+ (self.analysis.empty or self.analysis.multiline))
+ and (self.flow_level and self.analysis.allow_flow_plain
+ or (not self.flow_level and self.analysis.allow_block_plain))):
+ return ''
+ if self.event.style and self.event.style in '|>':
+ if (not self.flow_level and not self.simple_key_context
+ and self.analysis.allow_block):
+ return self.event.style
+ if not self.event.style or self.event.style == '\'':
+ if (self.analysis.allow_single_quoted and
+ not (self.simple_key_context and self.analysis.multiline)):
+ return '\''
+ return '"'
+
+ def process_scalar(self):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ split = (not self.simple_key_context)
+ #if self.analysis.multiline and split \
+ # and (not self.style or self.style in '\'\"'):
+ # self.write_indent()
+ if self.style == '"':
+ self.write_double_quoted(self.analysis.scalar, split)
+ elif self.style == '\'':
+ self.write_single_quoted(self.analysis.scalar, split)
+ elif self.style == '>':
+ self.write_folded(self.analysis.scalar)
+ elif self.style == '|':
+ self.write_literal(self.analysis.scalar)
+ else:
+ self.write_plain(self.analysis.scalar, split)
+ self.analysis = None
+ self.style = None
+
+ # Analyzers.
+
+ def prepare_version(self, version):
+ major, minor = version
+ if major != 1:
+ raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
+ return u'%d.%d' % (major, minor)
+
+ def prepare_tag_handle(self, handle):
+ if not handle:
+ raise EmitterError("tag handle must not be empty")
+ if handle[0] != u'!' or handle[-1] != u'!':
+ raise EmitterError("tag handle must start and end with '!': %r"
+ % (handle.encode('utf-8')))
+ for ch in handle[1:-1]:
+ if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_'):
+ raise EmitterError("invalid character %r in the tag handle: %r"
+ % (ch.encode('utf-8'), handle.encode('utf-8')))
+ return handle
+
+ def prepare_tag_prefix(self, prefix):
+ if not prefix:
+ raise EmitterError("tag prefix must not be empty")
+ chunks = []
+ start = end = 0
+ if prefix[0] == u'!':
+ end = 1
+ while end < len(prefix):
+ ch = prefix[end]
+ if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-;/?!:@&=+$,_.~*\'()[]':
+ end += 1
+ else:
+ if start < end:
+ chunks.append(prefix[start:end])
+ start = end = end+1
+ data = ch.encode('utf-8')
+ for ch in data:
+ chunks.append(u'%%%02X' % ord(ch))
+ if start < end:
+ chunks.append(prefix[start:end])
+ return u''.join(chunks)
+
+ def prepare_tag(self, tag):
+ if not tag:
+ raise EmitterError("tag must not be empty")
+ if tag == u'!':
+ return tag
+ handle = None
+ suffix = tag
+ prefixes = self.tag_prefixes.keys()
+ prefixes.sort()
+ for prefix in prefixes:
+ if tag.startswith(prefix) \
+ and (prefix == u'!' or len(prefix) < len(tag)):
+ handle = self.tag_prefixes[prefix]
+ suffix = tag[len(prefix):]
+ chunks = []
+ start = end = 0
+ while end < len(suffix):
+ ch = suffix[end]
+ if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-;/?:@&=+$,_.~*\'()[]' \
+ or (ch == u'!' and handle != u'!'):
+ end += 1
+ else:
+ if start < end:
+ chunks.append(suffix[start:end])
+ start = end = end+1
+ data = ch.encode('utf-8')
+ for ch in data:
+ chunks.append(u'%%%02X' % ord(ch))
+ if start < end:
+ chunks.append(suffix[start:end])
+ suffix_text = u''.join(chunks)
+ if handle:
+ return u'%s%s' % (handle, suffix_text)
+ else:
+ return u'!<%s>' % suffix_text
+
+ def prepare_anchor(self, anchor):
+ if not anchor:
+ raise EmitterError("anchor must not be empty")
+ for ch in anchor:
+ if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_'):
+ raise EmitterError("invalid character %r in the anchor: %r"
+ % (ch.encode('utf-8'), anchor.encode('utf-8')))
+ return anchor
+
+ def analyze_scalar(self, scalar):
+
+ # Empty scalar is a special case.
+ if not scalar:
+ return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
+ allow_flow_plain=False, allow_block_plain=True,
+ allow_single_quoted=True, allow_double_quoted=True,
+ allow_block=False)
+
+ # Indicators and special characters.
+ block_indicators = False
+ flow_indicators = False
+ line_breaks = False
+ special_characters = False
+
+ # Important whitespace combinations.
+ leading_space = False
+ leading_break = False
+ trailing_space = False
+ trailing_break = False
+ break_space = False
+ space_break = False
+
+ # Check document indicators.
+ if scalar.startswith(u'---') or scalar.startswith(u'...'):
+ block_indicators = True
+ flow_indicators = True
+
+ # First character or preceded by a whitespace.
+ preceeded_by_whitespace = True
+
+ # Last character or followed by a whitespace.
+ followed_by_whitespace = (len(scalar) == 1 or
+ scalar[1] in u'\0 \t\r\n\x85\u2028\u2029')
+
+ # The previous character is a space.
+ previous_space = False
+
+ # The previous character is a break.
+ previous_break = False
+
+ index = 0
+ while index < len(scalar):
+ ch = scalar[index]
+
+ # Check for indicators.
+ if index == 0:
+ # Leading indicators are special characters.
+ if ch in u'#,[]{}&*!|>\'\"%@`':
+ flow_indicators = True
+ block_indicators = True
+ if ch in u'?:':
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == u'-' and followed_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+ else:
+ # Some indicators cannot appear within a scalar as well.
+ if ch in u',?[]{}':
+ flow_indicators = True
+ if ch == u':':
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == u'#' and preceeded_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+
+ # Check for line breaks, special, and unicode characters.
+ if ch in u'\n\x85\u2028\u2029':
+ line_breaks = True
+ if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'):
+ if (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF'
+ or u'\uE000' <= ch <= u'\uFFFD') and ch != u'\uFEFF':
+ unicode_characters = True
+ if not self.allow_unicode:
+ special_characters = True
+ else:
+ special_characters = True
+
+ # Detect important whitespace combinations.
+ if ch == u' ':
+ if index == 0:
+ leading_space = True
+ if index == len(scalar)-1:
+ trailing_space = True
+ if previous_break:
+ break_space = True
+ previous_space = True
+ previous_break = False
+ elif ch in u'\n\x85\u2028\u2029':
+ if index == 0:
+ leading_break = True
+ if index == len(scalar)-1:
+ trailing_break = True
+ if previous_space:
+ space_break = True
+ previous_space = False
+ previous_break = True
+ else:
+ previous_space = False
+ previous_break = False
+
+ # Prepare for the next character.
+ index += 1
+ preceeded_by_whitespace = (ch in u'\0 \t\r\n\x85\u2028\u2029')
+ followed_by_whitespace = (index+1 >= len(scalar) or
+ scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029')
+
+ # Let's decide what styles are allowed.
+ allow_flow_plain = True
+ allow_block_plain = True
+ allow_single_quoted = True
+ allow_double_quoted = True
+ allow_block = True
+
+ # Leading and trailing whitespaces are bad for plain scalars.
+ if (leading_space or leading_break
+ or trailing_space or trailing_break):
+ allow_flow_plain = allow_block_plain = False
+
+ # We do not permit trailing spaces for block scalars.
+ if trailing_space:
+ allow_block = False
+
+ # Spaces at the beginning of a new line are only acceptable for block
+ # scalars.
+ if break_space:
+ allow_flow_plain = allow_block_plain = allow_single_quoted = False
+
+ # Spaces followed by breaks, as well as special character are only
+ # allowed for double quoted scalars.
+ if space_break or special_characters:
+ allow_flow_plain = allow_block_plain = \
+ allow_single_quoted = allow_block = False
+
+ # Although the plain scalar writer supports breaks, we never emit
+ # multiline plain scalars.
+ if line_breaks:
+ allow_flow_plain = allow_block_plain = False
+
+ # Flow indicators are forbidden for flow plain scalars.
+ if flow_indicators:
+ allow_flow_plain = False
+
+ # Block indicators are forbidden for block plain scalars.
+ if block_indicators:
+ allow_block_plain = False
+
+ return ScalarAnalysis(scalar=scalar,
+ empty=False, multiline=line_breaks,
+ allow_flow_plain=allow_flow_plain,
+ allow_block_plain=allow_block_plain,
+ allow_single_quoted=allow_single_quoted,
+ allow_double_quoted=allow_double_quoted,
+ allow_block=allow_block)
+
+ # Writers.
+
+ def flush_stream(self):
+ if hasattr(self.stream, 'flush'):
+ self.stream.flush()
+
+ def write_stream_start(self):
+ # Write BOM if needed.
+ if self.encoding and self.encoding.startswith('utf-16'):
+ self.stream.write(u'\uFEFF'.encode(self.encoding))
+
+ def write_stream_end(self):
+ self.flush_stream()
+
+ def write_indicator(self, indicator, need_whitespace,
+ whitespace=False, indention=False):
+ if self.whitespace or not need_whitespace:
+ data = indicator
+ else:
+ data = u' '+indicator
+ self.whitespace = whitespace
+ self.indention = self.indention and indention
+ self.column += len(data)
+ self.open_ended = False
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_indent(self):
+ indent = self.indent or 0
+ if not self.indention or self.column > indent \
+ or (self.column == indent and not self.whitespace):
+ self.write_line_break()
+ if self.column < indent:
+ self.whitespace = True
+ data = u' '*(indent-self.column)
+ self.column = indent
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_line_break(self, data=None):
+ if data is None:
+ data = self.best_line_break
+ self.whitespace = True
+ self.indention = True
+ self.line += 1
+ self.column = 0
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_version_directive(self, version_text):
+ data = u'%%YAML %s' % version_text
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ def write_tag_directive(self, handle_text, prefix_text):
+ data = u'%%TAG %s %s' % (handle_text, prefix_text)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ # Scalar streams.
+
+ def write_single_quoted(self, text, split=True):
+ self.write_indicator(u'\'', True)
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch is None or ch != u' ':
+ if start+1 == end and self.column > self.best_width and split \
+ and start != 0 and end != len(text):
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch is None or ch not in u'\n\x85\u2028\u2029':
+ if text[start] == u'\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u'\'':
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch == u'\'':
+ data = u'\'\''
+ self.column += 2
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end + 1
+ if ch is not None:
+ spaces = (ch == u' ')
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ end += 1
+ self.write_indicator(u'\'', False)
+
+ ESCAPE_REPLACEMENTS = {
+ u'\0': u'0',
+ u'\x07': u'a',
+ u'\x08': u'b',
+ u'\x09': u't',
+ u'\x0A': u'n',
+ u'\x0B': u'v',
+ u'\x0C': u'f',
+ u'\x0D': u'r',
+ u'\x1B': u'e',
+ u'\"': u'\"',
+ u'\\': u'\\',
+ u'\x85': u'N',
+ u'\xA0': u'_',
+ u'\u2028': u'L',
+ u'\u2029': u'P',
+ }
+
+ def write_double_quoted(self, text, split=True):
+ self.write_indicator(u'"', True)
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if ch is None or ch in u'"\\\x85\u2028\u2029\uFEFF' \
+ or not (u'\x20' <= ch <= u'\x7E'
+ or (self.allow_unicode
+ and (u'\xA0' <= ch <= u'\uD7FF'
+ or u'\uE000' <= ch <= u'\uFFFD'))):
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ if ch in self.ESCAPE_REPLACEMENTS:
+ data = u'\\'+self.ESCAPE_REPLACEMENTS[ch]
+ elif ch <= u'\xFF':
+ data = u'\\x%02X' % ord(ch)
+ elif ch <= u'\uFFFF':
+ data = u'\\u%04X' % ord(ch)
+ else:
+ data = u'\\U%08X' % ord(ch)
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end+1
+ if 0 < end < len(text)-1 and (ch == u' ' or start >= end) \
+ and self.column+(end-start) > self.best_width and split:
+ data = text[start:end]+u'\\'
+ if start < end:
+ start = end
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ if text[start] == u' ':
+ data = u'\\'
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ end += 1
+ self.write_indicator(u'"', False)
+
+ def determine_block_hints(self, text):
+ hints = u''
+ if text:
+ if text[0] in u' \n\x85\u2028\u2029':
+ hints += unicode(self.best_indent)
+ if text[-1] not in u'\n\x85\u2028\u2029':
+ hints += u'-'
+ elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029':
+ hints += u'+'
+ return hints
+
+ def write_folded(self, text):
+ hints = self.determine_block_hints(text)
+ self.write_indicator(u'>'+hints, True)
+ if hints[-1:] == u'+':
+ self.open_ended = True
+ self.write_line_break()
+ leading_space = True
+ spaces = False
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in u'\n\x85\u2028\u2029':
+ if not leading_space and ch is not None and ch != u' ' \
+ and text[start] == u'\n':
+ self.write_line_break()
+ leading_space = (ch == u' ')
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ elif spaces:
+ if ch != u' ':
+ if start+1 == end and self.column > self.best_width:
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ else:
+ if ch is None or ch in u' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ spaces = (ch == u' ')
+ end += 1
+
+ def write_literal(self, text):
+ hints = self.determine_block_hints(text)
+ self.write_indicator(u'|'+hints, True)
+ if hints[-1:] == u'+':
+ self.open_ended = True
+ self.write_line_break()
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in u'\n\x85\u2028\u2029':
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in u'\n\x85\u2028\u2029':
+ data = text[start:end]
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ end += 1
+
+ def write_plain(self, text, split=True):
+ if self.root_context:
+ self.open_ended = True
+ if not text:
+ return
+ if not self.whitespace:
+ data = u' '
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.whitespace = False
+ self.indention = False
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch != u' ':
+ if start+1 == end and self.column > self.best_width and split:
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch not in u'\n\x85\u2028\u2029':
+ if text[start] == u'\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ start = end
+ else:
+ if ch is None or ch in u' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ spaces = (ch == u' ')
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ end += 1
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/error.py b/collectors/python.d.plugin/python_modules/pyyaml2/error.py
new file mode 100644
index 000000000..5466be721
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/error.py
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
+
+class Mark(object):
+
+ def __init__(self, name, index, line, column, buffer, pointer):
+ self.name = name
+ self.index = index
+ self.line = line
+ self.column = column
+ self.buffer = buffer
+ self.pointer = pointer
+
+ def get_snippet(self, indent=4, max_length=75):
+ if self.buffer is None:
+ return None
+ head = ''
+ start = self.pointer
+ while start > 0 and self.buffer[start-1] not in u'\0\r\n\x85\u2028\u2029':
+ start -= 1
+ if self.pointer-start > max_length/2-1:
+ head = ' ... '
+ start += 5
+ break
+ tail = ''
+ end = self.pointer
+ while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029':
+ end += 1
+ if end-self.pointer > max_length/2-1:
+ tail = ' ... '
+ end -= 5
+ break
+ snippet = self.buffer[start:end].encode('utf-8')
+ return ' '*indent + head + snippet + tail + '\n' \
+ + ' '*(indent+self.pointer-start+len(head)) + '^'
+
+ def __str__(self):
+ snippet = self.get_snippet()
+ where = " in \"%s\", line %d, column %d" \
+ % (self.name, self.line+1, self.column+1)
+ if snippet is not None:
+ where += ":\n"+snippet
+ return where
+
+class YAMLError(Exception):
+ pass
+
+class MarkedYAMLError(YAMLError):
+
+ def __init__(self, context=None, context_mark=None,
+ problem=None, problem_mark=None, note=None):
+ self.context = context
+ self.context_mark = context_mark
+ self.problem = problem
+ self.problem_mark = problem_mark
+ self.note = note
+
+ def __str__(self):
+ lines = []
+ if self.context is not None:
+ lines.append(self.context)
+ if self.context_mark is not None \
+ and (self.problem is None or self.problem_mark is None
+ or self.context_mark.name != self.problem_mark.name
+ or self.context_mark.line != self.problem_mark.line
+ or self.context_mark.column != self.problem_mark.column):
+ lines.append(str(self.context_mark))
+ if self.problem is not None:
+ lines.append(self.problem)
+ if self.problem_mark is not None:
+ lines.append(str(self.problem_mark))
+ if self.note is not None:
+ lines.append(self.note)
+ return '\n'.join(lines)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/events.py b/collectors/python.d.plugin/python_modules/pyyaml2/events.py
new file mode 100644
index 000000000..283452add
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/events.py
@@ -0,0 +1,87 @@
+# SPDX-License-Identifier: MIT
+
+# Abstract classes.
+
+class Event(object):
+ def __init__(self, start_mark=None, end_mark=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
+ if hasattr(self, key)]
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+ for key in attributes])
+ return '%s(%s)' % (self.__class__.__name__, arguments)
+
+class NodeEvent(Event):
+ def __init__(self, anchor, start_mark=None, end_mark=None):
+ self.anchor = anchor
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class CollectionStartEvent(NodeEvent):
+ def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
+ flow_style=None):
+ self.anchor = anchor
+ self.tag = tag
+ self.implicit = implicit
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.flow_style = flow_style
+
+class CollectionEndEvent(Event):
+ pass
+
+# Implementations.
+
+class StreamStartEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None, encoding=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.encoding = encoding
+
+class StreamEndEvent(Event):
+ pass
+
+class DocumentStartEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None,
+ explicit=None, version=None, tags=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.explicit = explicit
+ self.version = version
+ self.tags = tags
+
+class DocumentEndEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None,
+ explicit=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.explicit = explicit
+
+class AliasEvent(NodeEvent):
+ pass
+
+class ScalarEvent(NodeEvent):
+ def __init__(self, anchor, tag, implicit, value,
+ start_mark=None, end_mark=None, style=None):
+ self.anchor = anchor
+ self.tag = tag
+ self.implicit = implicit
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
+class SequenceStartEvent(CollectionStartEvent):
+ pass
+
+class SequenceEndEvent(CollectionEndEvent):
+ pass
+
+class MappingStartEvent(CollectionStartEvent):
+ pass
+
+class MappingEndEvent(CollectionEndEvent):
+ pass
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/loader.py b/collectors/python.d.plugin/python_modules/pyyaml2/loader.py
new file mode 100644
index 000000000..1c195531f
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/loader.py
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['BaseLoader', 'SafeLoader', 'Loader']
+
+from reader import *
+from scanner import *
+from parser import *
+from composer import *
+from constructor import *
+from resolver import *
+
+class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ BaseConstructor.__init__(self)
+ BaseResolver.__init__(self)
+
+class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ SafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/nodes.py b/collectors/python.d.plugin/python_modules/pyyaml2/nodes.py
new file mode 100644
index 000000000..ed2a1b43e
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/nodes.py
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: MIT
+
+class Node(object):
+ def __init__(self, tag, value, start_mark, end_mark):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ value = self.value
+ #if isinstance(value, list):
+ # if len(value) == 0:
+ # value = '<empty>'
+ # elif len(value) == 1:
+ # value = '<1 item>'
+ # else:
+ # value = '<%d items>' % len(value)
+ #else:
+ # if len(value) > 75:
+ # value = repr(value[:70]+u' ... ')
+ # else:
+ # value = repr(value)
+ value = repr(value)
+ return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
+
+class ScalarNode(Node):
+ id = 'scalar'
+ def __init__(self, tag, value,
+ start_mark=None, end_mark=None, style=None):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
+class CollectionNode(Node):
+ def __init__(self, tag, value,
+ start_mark=None, end_mark=None, flow_style=None):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.flow_style = flow_style
+
+class SequenceNode(CollectionNode):
+ id = 'sequence'
+
+class MappingNode(CollectionNode):
+ id = 'mapping'
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/parser.py b/collectors/python.d.plugin/python_modules/pyyaml2/parser.py
new file mode 100644
index 000000000..97ba08337
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/parser.py
@@ -0,0 +1,590 @@
+# SPDX-License-Identifier: MIT
+
+# The following YAML grammar is LL(1) and is parsed by a recursive descent
+# parser.
+#
+# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+# implicit_document ::= block_node DOCUMENT-END*
+# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+# block_node_or_indentless_sequence ::=
+# ALIAS
+# | properties (block_content | indentless_block_sequence)?
+# | block_content
+# | indentless_block_sequence
+# block_node ::= ALIAS
+# | properties block_content?
+# | block_content
+# flow_node ::= ALIAS
+# | properties flow_content?
+# | flow_content
+# properties ::= TAG ANCHOR? | ANCHOR TAG?
+# block_content ::= block_collection | flow_collection | SCALAR
+# flow_content ::= flow_collection | SCALAR
+# block_collection ::= block_sequence | block_mapping
+# flow_collection ::= flow_sequence | flow_mapping
+# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+# block_mapping ::= BLOCK-MAPPING_START
+# ((KEY block_node_or_indentless_sequence?)?
+# (VALUE block_node_or_indentless_sequence?)?)*
+# BLOCK-END
+# flow_sequence ::= FLOW-SEQUENCE-START
+# (flow_sequence_entry FLOW-ENTRY)*
+# flow_sequence_entry?
+# FLOW-SEQUENCE-END
+# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+# flow_mapping ::= FLOW-MAPPING-START
+# (flow_mapping_entry FLOW-ENTRY)*
+# flow_mapping_entry?
+# FLOW-MAPPING-END
+# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+#
+# FIRST sets:
+#
+# stream: { STREAM-START }
+# explicit_document: { DIRECTIVE DOCUMENT-START }
+# implicit_document: FIRST(block_node)
+# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_sequence: { BLOCK-SEQUENCE-START }
+# block_mapping: { BLOCK-MAPPING-START }
+# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
+# indentless_sequence: { ENTRY }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_sequence: { FLOW-SEQUENCE-START }
+# flow_mapping: { FLOW-MAPPING-START }
+# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+
+__all__ = ['Parser', 'ParserError']
+
+from error import MarkedYAMLError
+from tokens import *
+from events import *
+from scanner import *
+
+class ParserError(MarkedYAMLError):
+ pass
+
+class Parser(object):
+ # Since writing a recursive-descendant parser is a straightforward task, we
+ # do not give many comments here.
+
+ DEFAULT_TAGS = {
+ u'!': u'!',
+ u'!!': u'tag:yaml.org,2002:',
+ }
+
+ def __init__(self):
+ self.current_event = None
+ self.yaml_version = None
+ self.tag_handles = {}
+ self.states = []
+ self.marks = []
+ self.state = self.parse_stream_start
+
+ def dispose(self):
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def check_event(self, *choices):
+ # Check the type of the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ if self.current_event is not None:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.current_event, choice):
+ return True
+ return False
+
+ def peek_event(self):
+ # Get the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ return self.current_event
+
+ def get_event(self):
+ # Get the next event and proceed further.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ value = self.current_event
+ self.current_event = None
+ return value
+
+ # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+ # implicit_document ::= block_node DOCUMENT-END*
+ # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+
+ def parse_stream_start(self):
+
+ # Parse the stream start.
+ token = self.get_token()
+ event = StreamStartEvent(token.start_mark, token.end_mark,
+ encoding=token.encoding)
+
+ # Prepare the next state.
+ self.state = self.parse_implicit_document_start
+
+ return event
+
+ def parse_implicit_document_start(self):
+
+ # Parse an implicit document.
+ if not self.check_token(DirectiveToken, DocumentStartToken,
+ StreamEndToken):
+ self.tag_handles = self.DEFAULT_TAGS
+ token = self.peek_token()
+ start_mark = end_mark = token.start_mark
+ event = DocumentStartEvent(start_mark, end_mark,
+ explicit=False)
+
+ # Prepare the next state.
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_block_node
+
+ return event
+
+ else:
+ return self.parse_document_start()
+
+ def parse_document_start(self):
+
+ # Parse any extra document end indicators.
+ while self.check_token(DocumentEndToken):
+ self.get_token()
+
+ # Parse an explicit document.
+ if not self.check_token(StreamEndToken):
+ token = self.peek_token()
+ start_mark = token.start_mark
+ version, tags = self.process_directives()
+ if not self.check_token(DocumentStartToken):
+ raise ParserError(None, None,
+ "expected '<document start>', but found %r"
+ % self.peek_token().id,
+ self.peek_token().start_mark)
+ token = self.get_token()
+ end_mark = token.end_mark
+ event = DocumentStartEvent(start_mark, end_mark,
+ explicit=True, version=version, tags=tags)
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_document_content
+ else:
+ # Parse the end of the stream.
+ token = self.get_token()
+ event = StreamEndEvent(token.start_mark, token.end_mark)
+ assert not self.states
+ assert not self.marks
+ self.state = None
+ return event
+
+ def parse_document_end(self):
+
+ # Parse the document end.
+ token = self.peek_token()
+ start_mark = end_mark = token.start_mark
+ explicit = False
+ if self.check_token(DocumentEndToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ explicit = True
+ event = DocumentEndEvent(start_mark, end_mark,
+ explicit=explicit)
+
+ # Prepare the next state.
+ self.state = self.parse_document_start
+
+ return event
+
+ def parse_document_content(self):
+ if self.check_token(DirectiveToken,
+ DocumentStartToken, DocumentEndToken, StreamEndToken):
+ event = self.process_empty_scalar(self.peek_token().start_mark)
+ self.state = self.states.pop()
+ return event
+ else:
+ return self.parse_block_node()
+
+ def process_directives(self):
+ self.yaml_version = None
+ self.tag_handles = {}
+ while self.check_token(DirectiveToken):
+ token = self.get_token()
+ if token.name == u'YAML':
+ if self.yaml_version is not None:
+ raise ParserError(None, None,
+ "found duplicate YAML directive", token.start_mark)
+ major, minor = token.value
+ if major != 1:
+ raise ParserError(None, None,
+ "found incompatible YAML document (version 1.* is required)",
+ token.start_mark)
+ self.yaml_version = token.value
+ elif token.name == u'TAG':
+ handle, prefix = token.value
+ if handle in self.tag_handles:
+ raise ParserError(None, None,
+ "duplicate tag handle %r" % handle.encode('utf-8'),
+ token.start_mark)
+ self.tag_handles[handle] = prefix
+ if self.tag_handles:
+ value = self.yaml_version, self.tag_handles.copy()
+ else:
+ value = self.yaml_version, None
+ for key in self.DEFAULT_TAGS:
+ if key not in self.tag_handles:
+ self.tag_handles[key] = self.DEFAULT_TAGS[key]
+ return value
+
+ # block_node_or_indentless_sequence ::= ALIAS
+ # | properties (block_content | indentless_block_sequence)?
+ # | block_content
+ # | indentless_block_sequence
+ # block_node ::= ALIAS
+ # | properties block_content?
+ # | block_content
+ # flow_node ::= ALIAS
+ # | properties flow_content?
+ # | flow_content
+ # properties ::= TAG ANCHOR? | ANCHOR TAG?
+ # block_content ::= block_collection | flow_collection | SCALAR
+ # flow_content ::= flow_collection | SCALAR
+ # block_collection ::= block_sequence | block_mapping
+ # flow_collection ::= flow_sequence | flow_mapping
+
+ def parse_block_node(self):
+ return self.parse_node(block=True)
+
+ def parse_flow_node(self):
+ return self.parse_node()
+
+ def parse_block_node_or_indentless_sequence(self):
+ return self.parse_node(block=True, indentless_sequence=True)
+
+ def parse_node(self, block=False, indentless_sequence=False):
+ if self.check_token(AliasToken):
+ token = self.get_token()
+ event = AliasEvent(token.value, token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ else:
+ anchor = None
+ tag = None
+ start_mark = end_mark = tag_mark = None
+ if self.check_token(AnchorToken):
+ token = self.get_token()
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ anchor = token.value
+ if self.check_token(TagToken):
+ token = self.get_token()
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ elif self.check_token(TagToken):
+ token = self.get_token()
+ start_mark = tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ if self.check_token(AnchorToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ anchor = token.value
+ if tag is not None:
+ handle, suffix = tag
+ if handle is not None:
+ if handle not in self.tag_handles:
+ raise ParserError("while parsing a node", start_mark,
+ "found undefined tag handle %r" % handle.encode('utf-8'),
+ tag_mark)
+ tag = self.tag_handles[handle]+suffix
+ else:
+ tag = suffix
+ #if tag == u'!':
+ # raise ParserError("while parsing a node", start_mark,
+ # "found non-specific tag '!'", tag_mark,
+ # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
+ if start_mark is None:
+ start_mark = end_mark = self.peek_token().start_mark
+ event = None
+ implicit = (tag is None or tag == u'!')
+ if indentless_sequence and self.check_token(BlockEntryToken):
+ end_mark = self.peek_token().end_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark)
+ self.state = self.parse_indentless_sequence_entry
+ else:
+ if self.check_token(ScalarToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ if (token.plain and tag is None) or tag == u'!':
+ implicit = (True, False)
+ elif tag is None:
+ implicit = (False, True)
+ else:
+ implicit = (False, False)
+ event = ScalarEvent(anchor, tag, implicit, token.value,
+ start_mark, end_mark, style=token.style)
+ self.state = self.states.pop()
+ elif self.check_token(FlowSequenceStartToken):
+ end_mark = self.peek_token().end_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=True)
+ self.state = self.parse_flow_sequence_first_entry
+ elif self.check_token(FlowMappingStartToken):
+ end_mark = self.peek_token().end_mark
+ event = MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=True)
+ self.state = self.parse_flow_mapping_first_key
+ elif block and self.check_token(BlockSequenceStartToken):
+ end_mark = self.peek_token().start_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=False)
+ self.state = self.parse_block_sequence_first_entry
+ elif block and self.check_token(BlockMappingStartToken):
+ end_mark = self.peek_token().start_mark
+ event = MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=False)
+ self.state = self.parse_block_mapping_first_key
+ elif anchor is not None or tag is not None:
+ # Empty scalars are allowed even if a tag or an anchor is
+ # specified.
+ event = ScalarEvent(anchor, tag, (implicit, False), u'',
+ start_mark, end_mark)
+ self.state = self.states.pop()
+ else:
+ if block:
+ node = 'block'
+ else:
+ node = 'flow'
+ token = self.peek_token()
+ raise ParserError("while parsing a %s node" % node, start_mark,
+ "expected the node content, but found %r" % token.id,
+ token.start_mark)
+ return event
+
+ # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+
+ def parse_block_sequence_first_entry(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_sequence_entry()
+
+ def parse_block_sequence_entry(self):
+ if self.check_token(BlockEntryToken):
+ token = self.get_token()
+ if not self.check_token(BlockEntryToken, BlockEndToken):
+ self.states.append(self.parse_block_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_block_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ if not self.check_token(BlockEndToken):
+ token = self.peek_token()
+ raise ParserError("while parsing a block collection", self.marks[-1],
+ "expected <block end>, but found %r" % token.id, token.start_mark)
+ token = self.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ # indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+
+ def parse_indentless_sequence_entry(self):
+ if self.check_token(BlockEntryToken):
+ token = self.get_token()
+ if not self.check_token(BlockEntryToken,
+ KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_indentless_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_indentless_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ token = self.peek_token()
+ event = SequenceEndEvent(token.start_mark, token.start_mark)
+ self.state = self.states.pop()
+ return event
+
+ # block_mapping ::= BLOCK-MAPPING_START
+ # ((KEY block_node_or_indentless_sequence?)?
+ # (VALUE block_node_or_indentless_sequence?)?)*
+ # BLOCK-END
+
+ def parse_block_mapping_first_key(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_mapping_key()
+
+ def parse_block_mapping_key(self):
+ if self.check_token(KeyToken):
+ token = self.get_token()
+ if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_value)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ if not self.check_token(BlockEndToken):
+ token = self.peek_token()
+ raise ParserError("while parsing a block mapping", self.marks[-1],
+ "expected <block end>, but found %r" % token.id, token.start_mark)
+ token = self.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_block_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_key)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_block_mapping_key
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ # flow_sequence ::= FLOW-SEQUENCE-START
+ # (flow_sequence_entry FLOW-ENTRY)*
+ # flow_sequence_entry?
+ # FLOW-SEQUENCE-END
+ # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+ #
+ # Note that while production rules for both flow_sequence_entry and
+ # flow_mapping_entry are equal, their interpretations are different.
+ # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
+ # generate an inline mapping (set syntax).
+
+ def parse_flow_sequence_first_entry(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_sequence_entry(first=True)
+
+ def parse_flow_sequence_entry(self, first=False):
+ if not self.check_token(FlowSequenceEndToken):
+ if not first:
+ if self.check_token(FlowEntryToken):
+ self.get_token()
+ else:
+ token = self.peek_token()
+ raise ParserError("while parsing a flow sequence", self.marks[-1],
+ "expected ',' or ']', but got %r" % token.id, token.start_mark)
+
+ if self.check_token(KeyToken):
+ token = self.peek_token()
+ event = MappingStartEvent(None, None, True,
+ token.start_mark, token.end_mark,
+ flow_style=True)
+ self.state = self.parse_flow_sequence_entry_mapping_key
+ return event
+ elif not self.check_token(FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry)
+ return self.parse_flow_node()
+ token = self.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_sequence_entry_mapping_key(self):
+ token = self.get_token()
+ if not self.check_token(ValueToken,
+ FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+
+ def parse_flow_sequence_entry_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_end)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_sequence_entry_mapping_end(self):
+ self.state = self.parse_flow_sequence_entry
+ token = self.peek_token()
+ return MappingEndEvent(token.start_mark, token.start_mark)
+
+ # flow_mapping ::= FLOW-MAPPING-START
+ # (flow_mapping_entry FLOW-ENTRY)*
+ # flow_mapping_entry?
+ # FLOW-MAPPING-END
+ # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+ def parse_flow_mapping_first_key(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_mapping_key(first=True)
+
+ def parse_flow_mapping_key(self, first=False):
+ if not self.check_token(FlowMappingEndToken):
+ if not first:
+ if self.check_token(FlowEntryToken):
+ self.get_token()
+ else:
+ token = self.peek_token()
+ raise ParserError("while parsing a flow mapping", self.marks[-1],
+ "expected ',' or '}', but got %r" % token.id, token.start_mark)
+ if self.check_token(KeyToken):
+ token = self.get_token()
+ if not self.check_token(ValueToken,
+ FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ elif not self.check_token(FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_empty_value)
+ return self.parse_flow_node()
+ token = self.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_key)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_mapping_key
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_mapping_empty_value(self):
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(self.peek_token().start_mark)
+
+ def process_empty_scalar(self, mark):
+ return ScalarEvent(None, None, (True, False), u'', mark, mark)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/reader.py b/collectors/python.d.plugin/python_modules/pyyaml2/reader.py
new file mode 100644
index 000000000..8d422954e
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/reader.py
@@ -0,0 +1,191 @@
+# SPDX-License-Identifier: MIT
+# This module contains abstractions for the input stream. You don't have to
+# looks further, there are no pretty code.
+#
+# We define two classes here.
+#
+# Mark(source, line, column)
+# It's just a record and its only use is producing nice error messages.
+# Parser does not use it for any other purposes.
+#
+# Reader(source, data)
+# Reader determines the encoding of `data` and converts it to unicode.
+# Reader provides the following methods and attributes:
+# reader.peek(length=1) - return the next `length` characters
+# reader.forward(length=1) - move the current position to `length` characters.
+# reader.index - the number of the current character.
+# reader.line, stream.column - the line and the column of the current character.
+
+__all__ = ['Reader', 'ReaderError']
+
+from error import YAMLError, Mark
+
+import codecs, re
+
+class ReaderError(YAMLError):
+
+ def __init__(self, name, position, character, encoding, reason):
+ self.name = name
+ self.character = character
+ self.position = position
+ self.encoding = encoding
+ self.reason = reason
+
+ def __str__(self):
+ if isinstance(self.character, str):
+ return "'%s' codec can't decode byte #x%02x: %s\n" \
+ " in \"%s\", position %d" \
+ % (self.encoding, ord(self.character), self.reason,
+ self.name, self.position)
+ else:
+ return "unacceptable character #x%04x: %s\n" \
+ " in \"%s\", position %d" \
+ % (self.character, self.reason,
+ self.name, self.position)
+
+class Reader(object):
+ # Reader:
+ # - determines the data encoding and converts it to unicode,
+ # - checks if characters are in allowed range,
+ # - adds '\0' to the end.
+
+ # Reader accepts
+ # - a `str` object,
+ # - a `unicode` object,
+ # - a file-like object with its `read` method returning `str`,
+ # - a file-like object with its `read` method returning `unicode`.
+
+ # Yeah, it's ugly and slow.
+
+ def __init__(self, stream):
+ self.name = None
+ self.stream = None
+ self.stream_pointer = 0
+ self.eof = True
+ self.buffer = u''
+ self.pointer = 0
+ self.raw_buffer = None
+ self.raw_decode = None
+ self.encoding = None
+ self.index = 0
+ self.line = 0
+ self.column = 0
+ if isinstance(stream, unicode):
+ self.name = "<unicode string>"
+ self.check_printable(stream)
+ self.buffer = stream+u'\0'
+ elif isinstance(stream, str):
+ self.name = "<string>"
+ self.raw_buffer = stream
+ self.determine_encoding()
+ else:
+ self.stream = stream
+ self.name = getattr(stream, 'name', "<file>")
+ self.eof = False
+ self.raw_buffer = ''
+ self.determine_encoding()
+
+ def peek(self, index=0):
+ try:
+ return self.buffer[self.pointer+index]
+ except IndexError:
+ self.update(index+1)
+ return self.buffer[self.pointer+index]
+
+ def prefix(self, length=1):
+ if self.pointer+length >= len(self.buffer):
+ self.update(length)
+ return self.buffer[self.pointer:self.pointer+length]
+
+ def forward(self, length=1):
+ if self.pointer+length+1 >= len(self.buffer):
+ self.update(length+1)
+ while length:
+ ch = self.buffer[self.pointer]
+ self.pointer += 1
+ self.index += 1
+ if ch in u'\n\x85\u2028\u2029' \
+ or (ch == u'\r' and self.buffer[self.pointer] != u'\n'):
+ self.line += 1
+ self.column = 0
+ elif ch != u'\uFEFF':
+ self.column += 1
+ length -= 1
+
+ def get_mark(self):
+ if self.stream is None:
+ return Mark(self.name, self.index, self.line, self.column,
+ self.buffer, self.pointer)
+ else:
+ return Mark(self.name, self.index, self.line, self.column,
+ None, None)
+
+ def determine_encoding(self):
+ while not self.eof and len(self.raw_buffer) < 2:
+ self.update_raw()
+ if not isinstance(self.raw_buffer, unicode):
+ if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
+ self.raw_decode = codecs.utf_16_le_decode
+ self.encoding = 'utf-16-le'
+ elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
+ self.raw_decode = codecs.utf_16_be_decode
+ self.encoding = 'utf-16-be'
+ else:
+ self.raw_decode = codecs.utf_8_decode
+ self.encoding = 'utf-8'
+ self.update(1)
+
+ NON_PRINTABLE = re.compile(u'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]')
+ def check_printable(self, data):
+ match = self.NON_PRINTABLE.search(data)
+ if match:
+ character = match.group()
+ position = self.index+(len(self.buffer)-self.pointer)+match.start()
+ raise ReaderError(self.name, position, ord(character),
+ 'unicode', "special characters are not allowed")
+
+ def update(self, length):
+ if self.raw_buffer is None:
+ return
+ self.buffer = self.buffer[self.pointer:]
+ self.pointer = 0
+ while len(self.buffer) < length:
+ if not self.eof:
+ self.update_raw()
+ if self.raw_decode is not None:
+ try:
+ data, converted = self.raw_decode(self.raw_buffer,
+ 'strict', self.eof)
+ except UnicodeDecodeError, exc:
+ character = exc.object[exc.start]
+ if self.stream is not None:
+ position = self.stream_pointer-len(self.raw_buffer)+exc.start
+ else:
+ position = exc.start
+ raise ReaderError(self.name, position, character,
+ exc.encoding, exc.reason)
+ else:
+ data = self.raw_buffer
+ converted = len(data)
+ self.check_printable(data)
+ self.buffer += data
+ self.raw_buffer = self.raw_buffer[converted:]
+ if self.eof:
+ self.buffer += u'\0'
+ self.raw_buffer = None
+ break
+
+ def update_raw(self, size=1024):
+ data = self.stream.read(size)
+ if data:
+ self.raw_buffer += data
+ self.stream_pointer += len(data)
+ else:
+ self.eof = True
+
+#try:
+# import psyco
+# psyco.bind(Reader)
+#except ImportError:
+# pass
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/representer.py b/collectors/python.d.plugin/python_modules/pyyaml2/representer.py
new file mode 100644
index 000000000..0a1404eca
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/representer.py
@@ -0,0 +1,485 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
+ 'RepresenterError']
+
+from error import *
+from nodes import *
+
+import datetime
+
+import sys, copy_reg, types
+
+class RepresenterError(YAMLError):
+ pass
+
+class BaseRepresenter(object):
+
+ yaml_representers = {}
+ yaml_multi_representers = {}
+
+ def __init__(self, default_style=None, default_flow_style=None):
+ self.default_style = default_style
+ self.default_flow_style = default_flow_style
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def represent(self, data):
+ node = self.represent_data(data)
+ self.serialize(node)
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def get_classobj_bases(self, cls):
+ bases = [cls]
+ for base in cls.__bases__:
+ bases.extend(self.get_classobj_bases(base))
+ return bases
+
+ def represent_data(self, data):
+ if self.ignore_aliases(data):
+ self.alias_key = None
+ else:
+ self.alias_key = id(data)
+ if self.alias_key is not None:
+ if self.alias_key in self.represented_objects:
+ node = self.represented_objects[self.alias_key]
+ #if node is None:
+ # raise RepresenterError("recursive objects are not allowed: %r" % data)
+ return node
+ #self.represented_objects[alias_key] = None
+ self.object_keeper.append(data)
+ data_types = type(data).__mro__
+ if type(data) is types.InstanceType:
+ data_types = self.get_classobj_bases(data.__class__)+list(data_types)
+ if data_types[0] in self.yaml_representers:
+ node = self.yaml_representers[data_types[0]](self, data)
+ else:
+ for data_type in data_types:
+ if data_type in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[data_type](self, data)
+ break
+ else:
+ if None in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[None](self, data)
+ elif None in self.yaml_representers:
+ node = self.yaml_representers[None](self, data)
+ else:
+ node = ScalarNode(None, unicode(data))
+ #if alias_key is not None:
+ # self.represented_objects[alias_key] = node
+ return node
+
+ def add_representer(cls, data_type, representer):
+ if not 'yaml_representers' in cls.__dict__:
+ cls.yaml_representers = cls.yaml_representers.copy()
+ cls.yaml_representers[data_type] = representer
+ add_representer = classmethod(add_representer)
+
+ def add_multi_representer(cls, data_type, representer):
+ if not 'yaml_multi_representers' in cls.__dict__:
+ cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
+ cls.yaml_multi_representers[data_type] = representer
+ add_multi_representer = classmethod(add_multi_representer)
+
+ def represent_scalar(self, tag, value, style=None):
+ if style is None:
+ style = self.default_style
+ node = ScalarNode(tag, value, style=style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ return node
+
+ def represent_sequence(self, tag, sequence, flow_style=None):
+ value = []
+ node = SequenceNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ for item in sequence:
+ node_item = self.represent_data(item)
+ if not (isinstance(node_item, ScalarNode) and not node_item.style):
+ best_style = False
+ value.append(node_item)
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def represent_mapping(self, tag, mapping, flow_style=None):
+ value = []
+ node = MappingNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ if hasattr(mapping, 'items'):
+ mapping = mapping.items()
+ mapping.sort()
+ for item_key, item_value in mapping:
+ node_key = self.represent_data(item_key)
+ node_value = self.represent_data(item_value)
+ if not (isinstance(node_key, ScalarNode) and not node_key.style):
+ best_style = False
+ if not (isinstance(node_value, ScalarNode) and not node_value.style):
+ best_style = False
+ value.append((node_key, node_value))
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def ignore_aliases(self, data):
+ return False
+
+class SafeRepresenter(BaseRepresenter):
+
+ def ignore_aliases(self, data):
+ if data in [None, ()]:
+ return True
+ if isinstance(data, (str, unicode, bool, int, float)):
+ return True
+
+ def represent_none(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:null',
+ u'null')
+
+ def represent_str(self, data):
+ tag = None
+ style = None
+ try:
+ data = unicode(data, 'ascii')
+ tag = u'tag:yaml.org,2002:str'
+ except UnicodeDecodeError:
+ try:
+ data = unicode(data, 'utf-8')
+ tag = u'tag:yaml.org,2002:str'
+ except UnicodeDecodeError:
+ data = data.encode('base64')
+ tag = u'tag:yaml.org,2002:binary'
+ style = '|'
+ return self.represent_scalar(tag, data, style=style)
+
+ def represent_unicode(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:str', data)
+
+ def represent_bool(self, data):
+ if data:
+ value = u'true'
+ else:
+ value = u'false'
+ return self.represent_scalar(u'tag:yaml.org,2002:bool', value)
+
+ def represent_int(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
+
+ def represent_long(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
+
+ inf_value = 1e300
+ while repr(inf_value) != repr(inf_value*inf_value):
+ inf_value *= inf_value
+
+ def represent_float(self, data):
+ if data != data or (data == 0.0 and data == 1.0):
+ value = u'.nan'
+ elif data == self.inf_value:
+ value = u'.inf'
+ elif data == -self.inf_value:
+ value = u'-.inf'
+ else:
+ value = unicode(repr(data)).lower()
+ # Note that in some cases `repr(data)` represents a float number
+ # without the decimal parts. For instance:
+ # >>> repr(1e17)
+ # '1e17'
+ # Unfortunately, this is not a valid float representation according
+ # to the definition of the `!!float` tag. We fix this by adding
+ # '.0' before the 'e' symbol.
+ if u'.' not in value and u'e' in value:
+ value = value.replace(u'e', u'.0e', 1)
+ return self.represent_scalar(u'tag:yaml.org,2002:float', value)
+
+ def represent_list(self, data):
+ #pairs = (len(data) > 0 and isinstance(data, list))
+ #if pairs:
+ # for item in data:
+ # if not isinstance(item, tuple) or len(item) != 2:
+ # pairs = False
+ # break
+ #if not pairs:
+ return self.represent_sequence(u'tag:yaml.org,2002:seq', data)
+ #value = []
+ #for item_key, item_value in data:
+ # value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
+ # [(item_key, item_value)]))
+ #return SequenceNode(u'tag:yaml.org,2002:pairs', value)
+
+ def represent_dict(self, data):
+ return self.represent_mapping(u'tag:yaml.org,2002:map', data)
+
+ def represent_set(self, data):
+ value = {}
+ for key in data:
+ value[key] = None
+ return self.represent_mapping(u'tag:yaml.org,2002:set', value)
+
+ def represent_date(self, data):
+ value = unicode(data.isoformat())
+ return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
+
+ def represent_datetime(self, data):
+ value = unicode(data.isoformat(' '))
+ return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
+
+ def represent_yaml_object(self, tag, data, cls, flow_style=None):
+ if hasattr(data, '__getstate__'):
+ state = data.__getstate__()
+ else:
+ state = data.__dict__.copy()
+ return self.represent_mapping(tag, state, flow_style=flow_style)
+
+ def represent_undefined(self, data):
+ raise RepresenterError("cannot represent an object: %s" % data)
+
+SafeRepresenter.add_representer(type(None),
+ SafeRepresenter.represent_none)
+
+SafeRepresenter.add_representer(str,
+ SafeRepresenter.represent_str)
+
+SafeRepresenter.add_representer(unicode,
+ SafeRepresenter.represent_unicode)
+
+SafeRepresenter.add_representer(bool,
+ SafeRepresenter.represent_bool)
+
+SafeRepresenter.add_representer(int,
+ SafeRepresenter.represent_int)
+
+SafeRepresenter.add_representer(long,
+ SafeRepresenter.represent_long)
+
+SafeRepresenter.add_representer(float,
+ SafeRepresenter.represent_float)
+
+SafeRepresenter.add_representer(list,
+ SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(tuple,
+ SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(dict,
+ SafeRepresenter.represent_dict)
+
+SafeRepresenter.add_representer(set,
+ SafeRepresenter.represent_set)
+
+SafeRepresenter.add_representer(datetime.date,
+ SafeRepresenter.represent_date)
+
+SafeRepresenter.add_representer(datetime.datetime,
+ SafeRepresenter.represent_datetime)
+
+SafeRepresenter.add_representer(None,
+ SafeRepresenter.represent_undefined)
+
+class Representer(SafeRepresenter):
+
+ def represent_str(self, data):
+ tag = None
+ style = None
+ try:
+ data = unicode(data, 'ascii')
+ tag = u'tag:yaml.org,2002:str'
+ except UnicodeDecodeError:
+ try:
+ data = unicode(data, 'utf-8')
+ tag = u'tag:yaml.org,2002:python/str'
+ except UnicodeDecodeError:
+ data = data.encode('base64')
+ tag = u'tag:yaml.org,2002:binary'
+ style = '|'
+ return self.represent_scalar(tag, data, style=style)
+
+ def represent_unicode(self, data):
+ tag = None
+ try:
+ data.encode('ascii')
+ tag = u'tag:yaml.org,2002:python/unicode'
+ except UnicodeEncodeError:
+ tag = u'tag:yaml.org,2002:str'
+ return self.represent_scalar(tag, data)
+
+ def represent_long(self, data):
+ tag = u'tag:yaml.org,2002:int'
+ if int(data) is not data:
+ tag = u'tag:yaml.org,2002:python/long'
+ return self.represent_scalar(tag, unicode(data))
+
+ def represent_complex(self, data):
+ if data.imag == 0.0:
+ data = u'%r' % data.real
+ elif data.real == 0.0:
+ data = u'%rj' % data.imag
+ elif data.imag > 0:
+ data = u'%r+%rj' % (data.real, data.imag)
+ else:
+ data = u'%r%rj' % (data.real, data.imag)
+ return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data)
+
+ def represent_tuple(self, data):
+ return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data)
+
+ def represent_name(self, data):
+ name = u'%s.%s' % (data.__module__, data.__name__)
+ return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'')
+
+ def represent_module(self, data):
+ return self.represent_scalar(
+ u'tag:yaml.org,2002:python/module:'+data.__name__, u'')
+
+ def represent_instance(self, data):
+ # For instances of classic classes, we use __getinitargs__ and
+ # __getstate__ to serialize the data.
+
+ # If data.__getinitargs__ exists, the object must be reconstructed by
+ # calling cls(**args), where args is a tuple returned by
+ # __getinitargs__. Otherwise, the cls.__init__ method should never be
+ # called and the class instance is created by instantiating a trivial
+ # class and assigning to the instance's __class__ variable.
+
+ # If data.__getstate__ exists, it returns the state of the object.
+ # Otherwise, the state of the object is data.__dict__.
+
+ # We produce either a !!python/object or !!python/object/new node.
+ # If data.__getinitargs__ does not exist and state is a dictionary, we
+ # produce a !!python/object node . Otherwise we produce a
+ # !!python/object/new node.
+
+ cls = data.__class__
+ class_name = u'%s.%s' % (cls.__module__, cls.__name__)
+ args = None
+ state = None
+ if hasattr(data, '__getinitargs__'):
+ args = list(data.__getinitargs__())
+ if hasattr(data, '__getstate__'):
+ state = data.__getstate__()
+ else:
+ state = data.__dict__
+ if args is None and isinstance(state, dict):
+ return self.represent_mapping(
+ u'tag:yaml.org,2002:python/object:'+class_name, state)
+ if isinstance(state, dict) and not state:
+ return self.represent_sequence(
+ u'tag:yaml.org,2002:python/object/new:'+class_name, args)
+ value = {}
+ if args:
+ value['args'] = args
+ value['state'] = state
+ return self.represent_mapping(
+ u'tag:yaml.org,2002:python/object/new:'+class_name, value)
+
+ def represent_object(self, data):
+ # We use __reduce__ API to save the data. data.__reduce__ returns
+ # a tuple of length 2-5:
+ # (function, args, state, listitems, dictitems)
+
+ # For reconstructing, we calls function(*args), then set its state,
+ # listitems, and dictitems if they are not None.
+
+ # A special case is when function.__name__ == '__newobj__'. In this
+ # case we create the object with args[0].__new__(*args).
+
+ # Another special case is when __reduce__ returns a string - we don't
+ # support it.
+
+ # We produce a !!python/object, !!python/object/new or
+ # !!python/object/apply node.
+
+ cls = type(data)
+ if cls in copy_reg.dispatch_table:
+ reduce = copy_reg.dispatch_table[cls](data)
+ elif hasattr(data, '__reduce_ex__'):
+ reduce = data.__reduce_ex__(2)
+ elif hasattr(data, '__reduce__'):
+ reduce = data.__reduce__()
+ else:
+ raise RepresenterError("cannot represent object: %r" % data)
+ reduce = (list(reduce)+[None]*5)[:5]
+ function, args, state, listitems, dictitems = reduce
+ args = list(args)
+ if state is None:
+ state = {}
+ if listitems is not None:
+ listitems = list(listitems)
+ if dictitems is not None:
+ dictitems = dict(dictitems)
+ if function.__name__ == '__newobj__':
+ function = args[0]
+ args = args[1:]
+ tag = u'tag:yaml.org,2002:python/object/new:'
+ newobj = True
+ else:
+ tag = u'tag:yaml.org,2002:python/object/apply:'
+ newobj = False
+ function_name = u'%s.%s' % (function.__module__, function.__name__)
+ if not args and not listitems and not dictitems \
+ and isinstance(state, dict) and newobj:
+ return self.represent_mapping(
+ u'tag:yaml.org,2002:python/object:'+function_name, state)
+ if not listitems and not dictitems \
+ and isinstance(state, dict) and not state:
+ return self.represent_sequence(tag+function_name, args)
+ value = {}
+ if args:
+ value['args'] = args
+ if state or not isinstance(state, dict):
+ value['state'] = state
+ if listitems:
+ value['listitems'] = listitems
+ if dictitems:
+ value['dictitems'] = dictitems
+ return self.represent_mapping(tag+function_name, value)
+
+Representer.add_representer(str,
+ Representer.represent_str)
+
+Representer.add_representer(unicode,
+ Representer.represent_unicode)
+
+Representer.add_representer(long,
+ Representer.represent_long)
+
+Representer.add_representer(complex,
+ Representer.represent_complex)
+
+Representer.add_representer(tuple,
+ Representer.represent_tuple)
+
+Representer.add_representer(type,
+ Representer.represent_name)
+
+Representer.add_representer(types.ClassType,
+ Representer.represent_name)
+
+Representer.add_representer(types.FunctionType,
+ Representer.represent_name)
+
+Representer.add_representer(types.BuiltinFunctionType,
+ Representer.represent_name)
+
+Representer.add_representer(types.ModuleType,
+ Representer.represent_module)
+
+Representer.add_multi_representer(types.InstanceType,
+ Representer.represent_instance)
+
+Representer.add_multi_representer(object,
+ Representer.represent_object)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/resolver.py b/collectors/python.d.plugin/python_modules/pyyaml2/resolver.py
new file mode 100644
index 000000000..49922debf
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/resolver.py
@@ -0,0 +1,225 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['BaseResolver', 'Resolver']
+
+from error import *
+from nodes import *
+
+import re
+
+class ResolverError(YAMLError):
+ pass
+
+class BaseResolver(object):
+
+ DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str'
+ DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq'
+ DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map'
+
+ yaml_implicit_resolvers = {}
+ yaml_path_resolvers = {}
+
+ def __init__(self):
+ self.resolver_exact_paths = []
+ self.resolver_prefix_paths = []
+
+ def add_implicit_resolver(cls, tag, regexp, first):
+ if not 'yaml_implicit_resolvers' in cls.__dict__:
+ cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
+ if first is None:
+ first = [None]
+ for ch in first:
+ cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
+ add_implicit_resolver = classmethod(add_implicit_resolver)
+
+ def add_path_resolver(cls, tag, path, kind=None):
+ # Note: `add_path_resolver` is experimental. The API could be changed.
+ # `new_path` is a pattern that is matched against the path from the
+ # root to the node that is being considered. `node_path` elements are
+ # tuples `(node_check, index_check)`. `node_check` is a node class:
+ # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
+ # matches any kind of a node. `index_check` could be `None`, a boolean
+ # value, a string value, or a number. `None` and `False` match against
+ # any _value_ of sequence and mapping nodes. `True` matches against
+ # any _key_ of a mapping node. A string `index_check` matches against
+ # a mapping value that corresponds to a scalar key which content is
+ # equal to the `index_check` value. An integer `index_check` matches
+ # against a sequence value with the index equal to `index_check`.
+ if not 'yaml_path_resolvers' in cls.__dict__:
+ cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
+ new_path = []
+ for element in path:
+ if isinstance(element, (list, tuple)):
+ if len(element) == 2:
+ node_check, index_check = element
+ elif len(element) == 1:
+ node_check = element[0]
+ index_check = True
+ else:
+ raise ResolverError("Invalid path element: %s" % element)
+ else:
+ node_check = None
+ index_check = element
+ if node_check is str:
+ node_check = ScalarNode
+ elif node_check is list:
+ node_check = SequenceNode
+ elif node_check is dict:
+ node_check = MappingNode
+ elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
+ and not isinstance(node_check, basestring) \
+ and node_check is not None:
+ raise ResolverError("Invalid node checker: %s" % node_check)
+ if not isinstance(index_check, (basestring, int)) \
+ and index_check is not None:
+ raise ResolverError("Invalid index checker: %s" % index_check)
+ new_path.append((node_check, index_check))
+ if kind is str:
+ kind = ScalarNode
+ elif kind is list:
+ kind = SequenceNode
+ elif kind is dict:
+ kind = MappingNode
+ elif kind not in [ScalarNode, SequenceNode, MappingNode] \
+ and kind is not None:
+ raise ResolverError("Invalid node kind: %s" % kind)
+ cls.yaml_path_resolvers[tuple(new_path), kind] = tag
+ add_path_resolver = classmethod(add_path_resolver)
+
+ def descend_resolver(self, current_node, current_index):
+ if not self.yaml_path_resolvers:
+ return
+ exact_paths = {}
+ prefix_paths = []
+ if current_node:
+ depth = len(self.resolver_prefix_paths)
+ for path, kind in self.resolver_prefix_paths[-1]:
+ if self.check_resolver_prefix(depth, path, kind,
+ current_node, current_index):
+ if len(path) > depth:
+ prefix_paths.append((path, kind))
+ else:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ for path, kind in self.yaml_path_resolvers:
+ if not path:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ prefix_paths.append((path, kind))
+ self.resolver_exact_paths.append(exact_paths)
+ self.resolver_prefix_paths.append(prefix_paths)
+
+ def ascend_resolver(self):
+ if not self.yaml_path_resolvers:
+ return
+ self.resolver_exact_paths.pop()
+ self.resolver_prefix_paths.pop()
+
+ def check_resolver_prefix(self, depth, path, kind,
+ current_node, current_index):
+ node_check, index_check = path[depth-1]
+ if isinstance(node_check, basestring):
+ if current_node.tag != node_check:
+ return
+ elif node_check is not None:
+ if not isinstance(current_node, node_check):
+ return
+ if index_check is True and current_index is not None:
+ return
+ if (index_check is False or index_check is None) \
+ and current_index is None:
+ return
+ if isinstance(index_check, basestring):
+ if not (isinstance(current_index, ScalarNode)
+ and index_check == current_index.value):
+ return
+ elif isinstance(index_check, int) and not isinstance(index_check, bool):
+ if index_check != current_index:
+ return
+ return True
+
+ def resolve(self, kind, value, implicit):
+ if kind is ScalarNode and implicit[0]:
+ if value == u'':
+ resolvers = self.yaml_implicit_resolvers.get(u'', [])
+ else:
+ resolvers = self.yaml_implicit_resolvers.get(value[0], [])
+ resolvers += self.yaml_implicit_resolvers.get(None, [])
+ for tag, regexp in resolvers:
+ if regexp.match(value):
+ return tag
+ implicit = implicit[1]
+ if self.yaml_path_resolvers:
+ exact_paths = self.resolver_exact_paths[-1]
+ if kind in exact_paths:
+ return exact_paths[kind]
+ if None in exact_paths:
+ return exact_paths[None]
+ if kind is ScalarNode:
+ return self.DEFAULT_SCALAR_TAG
+ elif kind is SequenceNode:
+ return self.DEFAULT_SEQUENCE_TAG
+ elif kind is MappingNode:
+ return self.DEFAULT_MAPPING_TAG
+
+class Resolver(BaseResolver):
+ pass
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:bool',
+ re.compile(ur'''^(?:yes|Yes|YES|no|No|NO
+ |true|True|TRUE|false|False|FALSE
+ |on|On|ON|off|Off|OFF)$''', re.X),
+ list(u'yYnNtTfFoO'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:float',
+ re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
+ |\.[0-9_]+(?:[eE][-+][0-9]+)?
+ |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
+ |[-+]?\.(?:inf|Inf|INF)
+ |\.(?:nan|NaN|NAN))$''', re.X),
+ list(u'-+0123456789.'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:int',
+ re.compile(ur'''^(?:[-+]?0b[0-1_]+
+ |[-+]?0[0-7_]+
+ |[-+]?(?:0|[1-9][0-9_]*)
+ |[-+]?0x[0-9a-fA-F_]+
+ |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
+ list(u'-+0123456789'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:merge',
+ re.compile(ur'^(?:<<)$'),
+ [u'<'])
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:null',
+ re.compile(ur'''^(?: ~
+ |null|Null|NULL
+ | )$''', re.X),
+ [u'~', u'n', u'N', u''])
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:timestamp',
+ re.compile(ur'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
+ |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
+ (?:[Tt]|[ \t]+)[0-9][0-9]?
+ :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
+ (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
+ list(u'0123456789'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:value',
+ re.compile(ur'^(?:=)$'),
+ [u'='])
+
+# The following resolver is only for documentation purposes. It cannot work
+# because plain scalars cannot start with '!', '&', or '*'.
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:yaml',
+ re.compile(ur'^(?:!|&|\*)$'),
+ list(u'!&*'))
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/scanner.py b/collectors/python.d.plugin/python_modules/pyyaml2/scanner.py
new file mode 100644
index 000000000..971da6127
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/scanner.py
@@ -0,0 +1,1458 @@
+# SPDX-License-Identifier: MIT
+
+# Scanner produces tokens of the following types:
+# STREAM-START
+# STREAM-END
+# DIRECTIVE(name, value)
+# DOCUMENT-START
+# DOCUMENT-END
+# BLOCK-SEQUENCE-START
+# BLOCK-MAPPING-START
+# BLOCK-END
+# FLOW-SEQUENCE-START
+# FLOW-MAPPING-START
+# FLOW-SEQUENCE-END
+# FLOW-MAPPING-END
+# BLOCK-ENTRY
+# FLOW-ENTRY
+# KEY
+# VALUE
+# ALIAS(value)
+# ANCHOR(value)
+# TAG(value)
+# SCALAR(value, plain, style)
+#
+# Read comments in the Scanner code for more details.
+#
+
+__all__ = ['Scanner', 'ScannerError']
+
+from error import MarkedYAMLError
+from tokens import *
+
+class ScannerError(MarkedYAMLError):
+ pass
+
+class SimpleKey(object):
+ # See below simple keys treatment.
+
+ def __init__(self, token_number, required, index, line, column, mark):
+ self.token_number = token_number
+ self.required = required
+ self.index = index
+ self.line = line
+ self.column = column
+ self.mark = mark
+
+class Scanner(object):
+
+ def __init__(self):
+ """Initialize the scanner."""
+ # It is assumed that Scanner and Reader will have a common descendant.
+ # Reader do the dirty work of checking for BOM and converting the
+ # input data to Unicode. It also adds NUL to the end.
+ #
+ # Reader supports the following methods
+ # self.peek(i=0) # peek the next i-th character
+ # self.prefix(l=1) # peek the next l characters
+ # self.forward(l=1) # read the next l characters and move the pointer.
+
+ # Had we reached the end of the stream?
+ self.done = False
+
+ # The number of unclosed '{' and '['. `flow_level == 0` means block
+ # context.
+ self.flow_level = 0
+
+ # List of processed tokens that are not yet emitted.
+ self.tokens = []
+
+ # Add the STREAM-START token.
+ self.fetch_stream_start()
+
+ # Number of tokens that were emitted through the `get_token` method.
+ self.tokens_taken = 0
+
+ # The current indentation level.
+ self.indent = -1
+
+ # Past indentation levels.
+ self.indents = []
+
+ # Variables related to simple keys treatment.
+
+ # A simple key is a key that is not denoted by the '?' indicator.
+ # Example of simple keys:
+ # ---
+ # block simple key: value
+ # ? not a simple key:
+ # : { flow simple key: value }
+ # We emit the KEY token before all keys, so when we find a potential
+ # simple key, we try to locate the corresponding ':' indicator.
+ # Simple keys should be limited to a single line and 1024 characters.
+
+ # Can a simple key start at the current position? A simple key may
+ # start:
+ # - at the beginning of the line, not counting indentation spaces
+ # (in block context),
+ # - after '{', '[', ',' (in the flow context),
+ # - after '?', ':', '-' (in the block context).
+ # In the block context, this flag also signifies if a block collection
+ # may start at the current position.
+ self.allow_simple_key = True
+
+ # Keep track of possible simple keys. This is a dictionary. The key
+ # is `flow_level`; there can be no more that one possible simple key
+ # for each level. The value is a SimpleKey record:
+ # (token_number, required, index, line, column, mark)
+ # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
+ # '[', or '{' tokens.
+ self.possible_simple_keys = {}
+
+ # Public methods.
+
+ def check_token(self, *choices):
+ # Check if the next token is one of the given types.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.tokens[0], choice):
+ return True
+ return False
+
+ def peek_token(self):
+ # Return the next token, but do not delete if from the queue.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ return self.tokens[0]
+
+ def get_token(self):
+ # Return the next token.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ self.tokens_taken += 1
+ return self.tokens.pop(0)
+
+ # Private methods.
+
+ def need_more_tokens(self):
+ if self.done:
+ return False
+ if not self.tokens:
+ return True
+ # The current token may be a potential simple key, so we
+ # need to look further.
+ self.stale_possible_simple_keys()
+ if self.next_possible_simple_key() == self.tokens_taken:
+ return True
+
+ def fetch_more_tokens(self):
+
+ # Eat whitespaces and comments until we reach the next token.
+ self.scan_to_next_token()
+
+ # Remove obsolete possible simple keys.
+ self.stale_possible_simple_keys()
+
+ # Compare the current indentation and column. It may add some tokens
+ # and decrease the current indentation level.
+ self.unwind_indent(self.column)
+
+ # Peek the next character.
+ ch = self.peek()
+
+ # Is it the end of stream?
+ if ch == u'\0':
+ return self.fetch_stream_end()
+
+ # Is it a directive?
+ if ch == u'%' and self.check_directive():
+ return self.fetch_directive()
+
+ # Is it the document start?
+ if ch == u'-' and self.check_document_start():
+ return self.fetch_document_start()
+
+ # Is it the document end?
+ if ch == u'.' and self.check_document_end():
+ return self.fetch_document_end()
+
+ # TODO: support for BOM within a stream.
+ #if ch == u'\uFEFF':
+ # return self.fetch_bom() <-- issue BOMToken
+
+ # Note: the order of the following checks is NOT significant.
+
+ # Is it the flow sequence start indicator?
+ if ch == u'[':
+ return self.fetch_flow_sequence_start()
+
+ # Is it the flow mapping start indicator?
+ if ch == u'{':
+ return self.fetch_flow_mapping_start()
+
+ # Is it the flow sequence end indicator?
+ if ch == u']':
+ return self.fetch_flow_sequence_end()
+
+ # Is it the flow mapping end indicator?
+ if ch == u'}':
+ return self.fetch_flow_mapping_end()
+
+ # Is it the flow entry indicator?
+ if ch == u',':
+ return self.fetch_flow_entry()
+
+ # Is it the block entry indicator?
+ if ch == u'-' and self.check_block_entry():
+ return self.fetch_block_entry()
+
+ # Is it the key indicator?
+ if ch == u'?' and self.check_key():
+ return self.fetch_key()
+
+ # Is it the value indicator?
+ if ch == u':' and self.check_value():
+ return self.fetch_value()
+
+ # Is it an alias?
+ if ch == u'*':
+ return self.fetch_alias()
+
+ # Is it an anchor?
+ if ch == u'&':
+ return self.fetch_anchor()
+
+ # Is it a tag?
+ if ch == u'!':
+ return self.fetch_tag()
+
+ # Is it a literal scalar?
+ if ch == u'|' and not self.flow_level:
+ return self.fetch_literal()
+
+ # Is it a folded scalar?
+ if ch == u'>' and not self.flow_level:
+ return self.fetch_folded()
+
+ # Is it a single quoted scalar?
+ if ch == u'\'':
+ return self.fetch_single()
+
+ # Is it a double quoted scalar?
+ if ch == u'\"':
+ return self.fetch_double()
+
+ # It must be a plain scalar then.
+ if self.check_plain():
+ return self.fetch_plain()
+
+ # No? It's an error. Let's produce a nice error message.
+ raise ScannerError("while scanning for the next token", None,
+ "found character %r that cannot start any token"
+ % ch.encode('utf-8'), self.get_mark())
+
+ # Simple keys treatment.
+
+ def next_possible_simple_key(self):
+ # Return the number of the nearest possible simple key. Actually we
+ # don't need to loop through the whole dictionary. We may replace it
+ # with the following code:
+ # if not self.possible_simple_keys:
+ # return None
+ # return self.possible_simple_keys[
+ # min(self.possible_simple_keys.keys())].token_number
+ min_token_number = None
+ for level in self.possible_simple_keys:
+ key = self.possible_simple_keys[level]
+ if min_token_number is None or key.token_number < min_token_number:
+ min_token_number = key.token_number
+ return min_token_number
+
+ def stale_possible_simple_keys(self):
+ # Remove entries that are no longer possible simple keys. According to
+ # the YAML specification, simple keys
+ # - should be limited to a single line,
+ # - should be no longer than 1024 characters.
+ # Disabling this procedure will allow simple keys of any length and
+ # height (may cause problems if indentation is broken though).
+ for level in self.possible_simple_keys.keys():
+ key = self.possible_simple_keys[level]
+ if key.line != self.line \
+ or self.index-key.index > 1024:
+ if key.required:
+ raise ScannerError("while scanning a simple key", key.mark,
+ "could not found expected ':'", self.get_mark())
+ del self.possible_simple_keys[level]
+
+ def save_possible_simple_key(self):
+ # The next token may start a simple key. We check if it's possible
+ # and save its position. This function is called for
+ # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
+
+ # Check if a simple key is required at the current position.
+ required = not self.flow_level and self.indent == self.column
+
+ # A simple key is required only if it is the first token in the current
+ # line. Therefore it is always allowed.
+ assert self.allow_simple_key or not required
+
+ # The next token might be a simple key. Let's save it's number and
+ # position.
+ if self.allow_simple_key:
+ self.remove_possible_simple_key()
+ token_number = self.tokens_taken+len(self.tokens)
+ key = SimpleKey(token_number, required,
+ self.index, self.line, self.column, self.get_mark())
+ self.possible_simple_keys[self.flow_level] = key
+
+ def remove_possible_simple_key(self):
+ # Remove the saved possible key position at the current flow level.
+ if self.flow_level in self.possible_simple_keys:
+ key = self.possible_simple_keys[self.flow_level]
+
+ if key.required:
+ raise ScannerError("while scanning a simple key", key.mark,
+ "could not found expected ':'", self.get_mark())
+
+ del self.possible_simple_keys[self.flow_level]
+
+ # Indentation functions.
+
+ def unwind_indent(self, column):
+
+ ## In flow context, tokens should respect indentation.
+ ## Actually the condition should be `self.indent >= column` according to
+ ## the spec. But this condition will prohibit intuitively correct
+ ## constructions such as
+ ## key : {
+ ## }
+ #if self.flow_level and self.indent > column:
+ # raise ScannerError(None, None,
+ # "invalid intendation or unclosed '[' or '{'",
+ # self.get_mark())
+
+ # In the flow context, indentation is ignored. We make the scanner less
+ # restrictive then specification requires.
+ if self.flow_level:
+ return
+
+ # In block context, we may need to issue the BLOCK-END tokens.
+ while self.indent > column:
+ mark = self.get_mark()
+ self.indent = self.indents.pop()
+ self.tokens.append(BlockEndToken(mark, mark))
+
+ def add_indent(self, column):
+ # Check if we need to increase indentation.
+ if self.indent < column:
+ self.indents.append(self.indent)
+ self.indent = column
+ return True
+ return False
+
+ # Fetchers.
+
+ def fetch_stream_start(self):
+ # We always add STREAM-START as the first token and STREAM-END as the
+ # last token.
+
+ # Read the token.
+ mark = self.get_mark()
+
+ # Add STREAM-START.
+ self.tokens.append(StreamStartToken(mark, mark,
+ encoding=self.encoding))
+
+
+ def fetch_stream_end(self):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+ self.possible_simple_keys = {}
+
+ # Read the token.
+ mark = self.get_mark()
+
+ # Add STREAM-END.
+ self.tokens.append(StreamEndToken(mark, mark))
+
+ # The steam is finished.
+ self.done = True
+
+ def fetch_directive(self):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Scan and add DIRECTIVE.
+ self.tokens.append(self.scan_directive())
+
+ def fetch_document_start(self):
+ self.fetch_document_indicator(DocumentStartToken)
+
+ def fetch_document_end(self):
+ self.fetch_document_indicator(DocumentEndToken)
+
+ def fetch_document_indicator(self, TokenClass):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys. Note that there could not be a block collection
+ # after '---'.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Add DOCUMENT-START or DOCUMENT-END.
+ start_mark = self.get_mark()
+ self.forward(3)
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_start(self):
+ self.fetch_flow_collection_start(FlowSequenceStartToken)
+
+ def fetch_flow_mapping_start(self):
+ self.fetch_flow_collection_start(FlowMappingStartToken)
+
+ def fetch_flow_collection_start(self, TokenClass):
+
+ # '[' and '{' may start a simple key.
+ self.save_possible_simple_key()
+
+ # Increase the flow level.
+ self.flow_level += 1
+
+ # Simple keys are allowed after '[' and '{'.
+ self.allow_simple_key = True
+
+ # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_end(self):
+ self.fetch_flow_collection_end(FlowSequenceEndToken)
+
+ def fetch_flow_mapping_end(self):
+ self.fetch_flow_collection_end(FlowMappingEndToken)
+
+ def fetch_flow_collection_end(self, TokenClass):
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Decrease the flow level.
+ self.flow_level -= 1
+
+ # No simple keys after ']' or '}'.
+ self.allow_simple_key = False
+
+ # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_entry(self):
+
+ # Simple keys are allowed after ','.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add FLOW-ENTRY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(FlowEntryToken(start_mark, end_mark))
+
+ def fetch_block_entry(self):
+
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a new entry?
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "sequence entries are not allowed here",
+ self.get_mark())
+
+ # We may need to add BLOCK-SEQUENCE-START.
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockSequenceStartToken(mark, mark))
+
+ # It's an error for the block entry to occur in the flow context,
+ # but we let the parser detect this.
+ else:
+ pass
+
+ # Simple keys are allowed after '-'.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add BLOCK-ENTRY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(BlockEntryToken(start_mark, end_mark))
+
+ def fetch_key(self):
+
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a key (not nessesary a simple)?
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "mapping keys are not allowed here",
+ self.get_mark())
+
+ # We may need to add BLOCK-MAPPING-START.
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after '?' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add KEY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(KeyToken(start_mark, end_mark))
+
+ def fetch_value(self):
+
+ # Do we determine a simple key?
+ if self.flow_level in self.possible_simple_keys:
+
+ # Add KEY.
+ key = self.possible_simple_keys[self.flow_level]
+ del self.possible_simple_keys[self.flow_level]
+ self.tokens.insert(key.token_number-self.tokens_taken,
+ KeyToken(key.mark, key.mark))
+
+ # If this key starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START.
+ if not self.flow_level:
+ if self.add_indent(key.column):
+ self.tokens.insert(key.token_number-self.tokens_taken,
+ BlockMappingStartToken(key.mark, key.mark))
+
+ # There cannot be two simple keys one after another.
+ self.allow_simple_key = False
+
+ # It must be a part of a complex key.
+ else:
+
+ # Block context needs additional checks.
+ # (Do we really need them? They will be catched by the parser
+ # anyway.)
+ if not self.flow_level:
+
+ # We are allowed to start a complex value if and only if
+ # we can start a simple key.
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "mapping values are not allowed here",
+ self.get_mark())
+
+ # If this value starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START. It will be detected as an error later by
+ # the parser.
+ if not self.flow_level:
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after ':' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add VALUE.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(ValueToken(start_mark, end_mark))
+
+ def fetch_alias(self):
+
+ # ALIAS could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after ALIAS.
+ self.allow_simple_key = False
+
+ # Scan and add ALIAS.
+ self.tokens.append(self.scan_anchor(AliasToken))
+
+ def fetch_anchor(self):
+
+ # ANCHOR could start a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after ANCHOR.
+ self.allow_simple_key = False
+
+ # Scan and add ANCHOR.
+ self.tokens.append(self.scan_anchor(AnchorToken))
+
+ def fetch_tag(self):
+
+ # TAG could start a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after TAG.
+ self.allow_simple_key = False
+
+ # Scan and add TAG.
+ self.tokens.append(self.scan_tag())
+
+ def fetch_literal(self):
+ self.fetch_block_scalar(style='|')
+
+ def fetch_folded(self):
+ self.fetch_block_scalar(style='>')
+
+ def fetch_block_scalar(self, style):
+
+ # A simple key may follow a block scalar.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_block_scalar(style))
+
+ def fetch_single(self):
+ self.fetch_flow_scalar(style='\'')
+
+ def fetch_double(self):
+ self.fetch_flow_scalar(style='"')
+
+ def fetch_flow_scalar(self, style):
+
+ # A flow scalar could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after flow scalars.
+ self.allow_simple_key = False
+
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_flow_scalar(style))
+
+ def fetch_plain(self):
+
+ # A plain scalar could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after plain scalars. But note that `scan_plain` will
+ # change this flag if the scan is finished at the beginning of the
+ # line.
+ self.allow_simple_key = False
+
+ # Scan and add SCALAR. May change `allow_simple_key`.
+ self.tokens.append(self.scan_plain())
+
+ # Checkers.
+
+ def check_directive(self):
+
+ # DIRECTIVE: ^ '%' ...
+ # The '%' indicator is already checked.
+ if self.column == 0:
+ return True
+
+ def check_document_start(self):
+
+ # DOCUMENT-START: ^ '---' (' '|'\n')
+ if self.column == 0:
+ if self.prefix(3) == u'---' \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return True
+
+ def check_document_end(self):
+
+ # DOCUMENT-END: ^ '...' (' '|'\n')
+ if self.column == 0:
+ if self.prefix(3) == u'...' \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return True
+
+ def check_block_entry(self):
+
+ # BLOCK-ENTRY: '-' (' '|'\n')
+ return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
+
+ def check_key(self):
+
+ # KEY(flow context): '?'
+ if self.flow_level:
+ return True
+
+ # KEY(block context): '?' (' '|'\n')
+ else:
+ return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
+
+ def check_value(self):
+
+ # VALUE(flow context): ':'
+ if self.flow_level:
+ return True
+
+ # VALUE(block context): ':' (' '|'\n')
+ else:
+ return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
+
+ def check_plain(self):
+
+ # A plain scalar may start with any non-space character except:
+ # '-', '?', ':', ',', '[', ']', '{', '}',
+ # '#', '&', '*', '!', '|', '>', '\'', '\"',
+ # '%', '@', '`'.
+ #
+ # It may also start with
+ # '-', '?', ':'
+ # if it is followed by a non-space character.
+ #
+ # Note that we limit the last rule to the block context (except the
+ # '-' character) because we want the flow context to be space
+ # independent.
+ ch = self.peek()
+ return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \
+ or (self.peek(1) not in u'\0 \t\r\n\x85\u2028\u2029'
+ and (ch == u'-' or (not self.flow_level and ch in u'?:')))
+
+ # Scanners.
+
+ def scan_to_next_token(self):
+ # We ignore spaces, line breaks and comments.
+ # If we find a line break in the block context, we set the flag
+ # `allow_simple_key` on.
+ # The byte order mark is stripped if it's the first character in the
+ # stream. We do not yet support BOM inside the stream as the
+ # specification requires. Any such mark will be considered as a part
+ # of the document.
+ #
+ # TODO: We need to make tab handling rules more sane. A good rule is
+ # Tabs cannot precede tokens
+ # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
+ # KEY(block), VALUE(block), BLOCK-ENTRY
+ # So the checking code is
+ # if <TAB>:
+ # self.allow_simple_keys = False
+ # We also need to add the check for `allow_simple_keys == True` to
+ # `unwind_indent` before issuing BLOCK-END.
+ # Scanners for block, flow, and plain scalars need to be modified.
+
+ if self.index == 0 and self.peek() == u'\uFEFF':
+ self.forward()
+ found = False
+ while not found:
+ while self.peek() == u' ':
+ self.forward()
+ if self.peek() == u'#':
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ if self.scan_line_break():
+ if not self.flow_level:
+ self.allow_simple_key = True
+ else:
+ found = True
+
+ def scan_directive(self):
+ # See the specification for details.
+ start_mark = self.get_mark()
+ self.forward()
+ name = self.scan_directive_name(start_mark)
+ value = None
+ if name == u'YAML':
+ value = self.scan_yaml_directive_value(start_mark)
+ end_mark = self.get_mark()
+ elif name == u'TAG':
+ value = self.scan_tag_directive_value(start_mark)
+ end_mark = self.get_mark()
+ else:
+ end_mark = self.get_mark()
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ self.scan_directive_ignored_line(start_mark)
+ return DirectiveToken(name, value, start_mark, end_mark)
+
+ def scan_directive_name(self, start_mark):
+ # See the specification for details.
+ length = 0
+ ch = self.peek(length)
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_':
+ length += 1
+ ch = self.peek(length)
+ if not length:
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ value = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ return value
+
+ def scan_yaml_directive_value(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ major = self.scan_yaml_directive_number(start_mark)
+ if self.peek() != '.':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit or '.', but found %r"
+ % self.peek().encode('utf-8'),
+ self.get_mark())
+ self.forward()
+ minor = self.scan_yaml_directive_number(start_mark)
+ if self.peek() not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit or ' ', but found %r"
+ % self.peek().encode('utf-8'),
+ self.get_mark())
+ return (major, minor)
+
+ def scan_yaml_directive_number(self, start_mark):
+ # See the specification for details.
+ ch = self.peek()
+ if not (u'0' <= ch <= u'9'):
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit, but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ length = 0
+ while u'0' <= self.peek(length) <= u'9':
+ length += 1
+ value = int(self.prefix(length))
+ self.forward(length)
+ return value
+
+ def scan_tag_directive_value(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ handle = self.scan_tag_directive_handle(start_mark)
+ while self.peek() == u' ':
+ self.forward()
+ prefix = self.scan_tag_directive_prefix(start_mark)
+ return (handle, prefix)
+
+ def scan_tag_directive_handle(self, start_mark):
+ # See the specification for details.
+ value = self.scan_tag_handle('directive', start_mark)
+ ch = self.peek()
+ if ch != u' ':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected ' ', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ return value
+
+ def scan_tag_directive_prefix(self, start_mark):
+ # See the specification for details.
+ value = self.scan_tag_uri('directive', start_mark)
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected ' ', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ return value
+
+ def scan_directive_ignored_line(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ if self.peek() == u'#':
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ ch = self.peek()
+ if ch not in u'\0\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a comment or a line break, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ self.scan_line_break()
+
+ def scan_anchor(self, TokenClass):
+ # The specification does not restrict characters for anchors and
+ # aliases. This may lead to problems, for instance, the document:
+ # [ *alias, value ]
+ # can be interpteted in two ways, as
+ # [ "value" ]
+ # and
+ # [ *alias , "value" ]
+ # Therefore we restrict aliases to numbers and ASCII letters.
+ start_mark = self.get_mark()
+ indicator = self.peek()
+ if indicator == u'*':
+ name = 'alias'
+ else:
+ name = 'anchor'
+ self.forward()
+ length = 0
+ ch = self.peek(length)
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_':
+ length += 1
+ ch = self.peek(length)
+ if not length:
+ raise ScannerError("while scanning an %s" % name, start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ value = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch not in u'\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
+ raise ScannerError("while scanning an %s" % name, start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ end_mark = self.get_mark()
+ return TokenClass(value, start_mark, end_mark)
+
+ def scan_tag(self):
+ # See the specification for details.
+ start_mark = self.get_mark()
+ ch = self.peek(1)
+ if ch == u'<':
+ handle = None
+ self.forward(2)
+ suffix = self.scan_tag_uri('tag', start_mark)
+ if self.peek() != u'>':
+ raise ScannerError("while parsing a tag", start_mark,
+ "expected '>', but found %r" % self.peek().encode('utf-8'),
+ self.get_mark())
+ self.forward()
+ elif ch in u'\0 \t\r\n\x85\u2028\u2029':
+ handle = None
+ suffix = u'!'
+ self.forward()
+ else:
+ length = 1
+ use_handle = False
+ while ch not in u'\0 \r\n\x85\u2028\u2029':
+ if ch == u'!':
+ use_handle = True
+ break
+ length += 1
+ ch = self.peek(length)
+ handle = u'!'
+ if use_handle:
+ handle = self.scan_tag_handle('tag', start_mark)
+ else:
+ handle = u'!'
+ self.forward()
+ suffix = self.scan_tag_uri('tag', start_mark)
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a tag", start_mark,
+ "expected ' ', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ value = (handle, suffix)
+ end_mark = self.get_mark()
+ return TagToken(value, start_mark, end_mark)
+
+ def scan_block_scalar(self, style):
+ # See the specification for details.
+
+ if style == '>':
+ folded = True
+ else:
+ folded = False
+
+ chunks = []
+ start_mark = self.get_mark()
+
+ # Scan the header.
+ self.forward()
+ chomping, increment = self.scan_block_scalar_indicators(start_mark)
+ self.scan_block_scalar_ignored_line(start_mark)
+
+ # Determine the indentation level and go to the first non-empty line.
+ min_indent = self.indent+1
+ if min_indent < 1:
+ min_indent = 1
+ if increment is None:
+ breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
+ indent = max(min_indent, max_indent)
+ else:
+ indent = min_indent+increment-1
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ line_break = u''
+
+ # Scan the inner part of the block scalar.
+ while self.column == indent and self.peek() != u'\0':
+ chunks.extend(breaks)
+ leading_non_space = self.peek() not in u' \t'
+ length = 0
+ while self.peek(length) not in u'\0\r\n\x85\u2028\u2029':
+ length += 1
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ line_break = self.scan_line_break()
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ if self.column == indent and self.peek() != u'\0':
+
+ # Unfortunately, folding rules are ambiguous.
+ #
+ # This is the folding according to the specification:
+
+ if folded and line_break == u'\n' \
+ and leading_non_space and self.peek() not in u' \t':
+ if not breaks:
+ chunks.append(u' ')
+ else:
+ chunks.append(line_break)
+
+ # This is Clark Evans's interpretation (also in the spec
+ # examples):
+ #
+ #if folded and line_break == u'\n':
+ # if not breaks:
+ # if self.peek() not in ' \t':
+ # chunks.append(u' ')
+ # else:
+ # chunks.append(line_break)
+ #else:
+ # chunks.append(line_break)
+ else:
+ break
+
+ # Chomp the tail.
+ if chomping is not False:
+ chunks.append(line_break)
+ if chomping is True:
+ chunks.extend(breaks)
+
+ # We are done.
+ return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
+ style)
+
+ def scan_block_scalar_indicators(self, start_mark):
+ # See the specification for details.
+ chomping = None
+ increment = None
+ ch = self.peek()
+ if ch in u'+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.forward()
+ ch = self.peek()
+ if ch in u'0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected indentation indicator in the range 1-9, but found 0",
+ self.get_mark())
+ self.forward()
+ elif ch in u'0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected indentation indicator in the range 1-9, but found 0",
+ self.get_mark())
+ self.forward()
+ ch = self.peek()
+ if ch in u'+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.forward()
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected chomping or indentation indicators, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ return chomping, increment
+
+ def scan_block_scalar_ignored_line(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ if self.peek() == u'#':
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ ch = self.peek()
+ if ch not in u'\0\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected a comment or a line break, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ self.scan_line_break()
+
+ def scan_block_scalar_indentation(self):
+ # See the specification for details.
+ chunks = []
+ max_indent = 0
+ end_mark = self.get_mark()
+ while self.peek() in u' \r\n\x85\u2028\u2029':
+ if self.peek() != u' ':
+ chunks.append(self.scan_line_break())
+ end_mark = self.get_mark()
+ else:
+ self.forward()
+ if self.column > max_indent:
+ max_indent = self.column
+ return chunks, max_indent, end_mark
+
+ def scan_block_scalar_breaks(self, indent):
+ # See the specification for details.
+ chunks = []
+ end_mark = self.get_mark()
+ while self.column < indent and self.peek() == u' ':
+ self.forward()
+ while self.peek() in u'\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ end_mark = self.get_mark()
+ while self.column < indent and self.peek() == u' ':
+ self.forward()
+ return chunks, end_mark
+
+ def scan_flow_scalar(self, style):
+ # See the specification for details.
+ # Note that we loose indentation rules for quoted scalars. Quoted
+ # scalars don't need to adhere indentation because " and ' clearly
+ # mark the beginning and the end of them. Therefore we are less
+ # restrictive then the specification requires. We only need to check
+ # that document separators are not included in scalars.
+ if style == '"':
+ double = True
+ else:
+ double = False
+ chunks = []
+ start_mark = self.get_mark()
+ quote = self.peek()
+ self.forward()
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ while self.peek() != quote:
+ chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ self.forward()
+ end_mark = self.get_mark()
+ return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
+ style)
+
+ ESCAPE_REPLACEMENTS = {
+ u'0': u'\0',
+ u'a': u'\x07',
+ u'b': u'\x08',
+ u't': u'\x09',
+ u'\t': u'\x09',
+ u'n': u'\x0A',
+ u'v': u'\x0B',
+ u'f': u'\x0C',
+ u'r': u'\x0D',
+ u'e': u'\x1B',
+ u' ': u'\x20',
+ u'\"': u'\"',
+ u'\\': u'\\',
+ u'N': u'\x85',
+ u'_': u'\xA0',
+ u'L': u'\u2028',
+ u'P': u'\u2029',
+ }
+
+ ESCAPE_CODES = {
+ u'x': 2,
+ u'u': 4,
+ u'U': 8,
+ }
+
+ def scan_flow_scalar_non_spaces(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ while True:
+ length = 0
+ while self.peek(length) not in u'\'\"\\\0 \t\r\n\x85\u2028\u2029':
+ length += 1
+ if length:
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ ch = self.peek()
+ if not double and ch == u'\'' and self.peek(1) == u'\'':
+ chunks.append(u'\'')
+ self.forward(2)
+ elif (double and ch == u'\'') or (not double and ch in u'\"\\'):
+ chunks.append(ch)
+ self.forward()
+ elif double and ch == u'\\':
+ self.forward()
+ ch = self.peek()
+ if ch in self.ESCAPE_REPLACEMENTS:
+ chunks.append(self.ESCAPE_REPLACEMENTS[ch])
+ self.forward()
+ elif ch in self.ESCAPE_CODES:
+ length = self.ESCAPE_CODES[ch]
+ self.forward()
+ for k in range(length):
+ if self.peek(k) not in u'0123456789ABCDEFabcdef':
+ raise ScannerError("while scanning a double-quoted scalar", start_mark,
+ "expected escape sequence of %d hexdecimal numbers, but found %r" %
+ (length, self.peek(k).encode('utf-8')), self.get_mark())
+ code = int(self.prefix(length), 16)
+ chunks.append(unichr(code))
+ self.forward(length)
+ elif ch in u'\r\n\x85\u2028\u2029':
+ self.scan_line_break()
+ chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
+ else:
+ raise ScannerError("while scanning a double-quoted scalar", start_mark,
+ "found unknown escape character %r" % ch.encode('utf-8'), self.get_mark())
+ else:
+ return chunks
+
+ def scan_flow_scalar_spaces(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ length = 0
+ while self.peek(length) in u' \t':
+ length += 1
+ whitespaces = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch == u'\0':
+ raise ScannerError("while scanning a quoted scalar", start_mark,
+ "found unexpected end of stream", self.get_mark())
+ elif ch in u'\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ breaks = self.scan_flow_scalar_breaks(double, start_mark)
+ if line_break != u'\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(u' ')
+ chunks.extend(breaks)
+ else:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_flow_scalar_breaks(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ while True:
+ # Instead of checking indentation, we check for document
+ # separators.
+ prefix = self.prefix(3)
+ if (prefix == u'---' or prefix == u'...') \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a quoted scalar", start_mark,
+ "found unexpected document separator", self.get_mark())
+ while self.peek() in u' \t':
+ self.forward()
+ if self.peek() in u'\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ else:
+ return chunks
+
+ def scan_plain(self):
+ # See the specification for details.
+ # We add an additional restriction for the flow context:
+ # plain scalars in the flow context cannot contain ',', ':' and '?'.
+ # We also keep track of the `allow_simple_key` flag here.
+ # Indentation rules are loosed for the flow context.
+ chunks = []
+ start_mark = self.get_mark()
+ end_mark = start_mark
+ indent = self.indent+1
+ # We allow zero indentation for scalars, but then we need to check for
+ # document separators at the beginning of the line.
+ #if indent == 0:
+ # indent = 1
+ spaces = []
+ while True:
+ length = 0
+ if self.peek() == u'#':
+ break
+ while True:
+ ch = self.peek(length)
+ if ch in u'\0 \t\r\n\x85\u2028\u2029' \
+ or (not self.flow_level and ch == u':' and
+ self.peek(length+1) in u'\0 \t\r\n\x85\u2028\u2029') \
+ or (self.flow_level and ch in u',:?[]{}'):
+ break
+ length += 1
+ # It's not clear what we should do with ':' in the flow context.
+ if (self.flow_level and ch == u':'
+ and self.peek(length+1) not in u'\0 \t\r\n\x85\u2028\u2029,[]{}'):
+ self.forward(length)
+ raise ScannerError("while scanning a plain scalar", start_mark,
+ "found unexpected ':'", self.get_mark(),
+ "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.")
+ if length == 0:
+ break
+ self.allow_simple_key = False
+ chunks.extend(spaces)
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ end_mark = self.get_mark()
+ spaces = self.scan_plain_spaces(indent, start_mark)
+ if not spaces or self.peek() == u'#' \
+ or (not self.flow_level and self.column < indent):
+ break
+ return ScalarToken(u''.join(chunks), True, start_mark, end_mark)
+
+ def scan_plain_spaces(self, indent, start_mark):
+ # See the specification for details.
+ # The specification is really confusing about tabs in plain scalars.
+ # We just forbid them completely. Do not use tabs in YAML!
+ chunks = []
+ length = 0
+ while self.peek(length) in u' ':
+ length += 1
+ whitespaces = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch in u'\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ self.allow_simple_key = True
+ prefix = self.prefix(3)
+ if (prefix == u'---' or prefix == u'...') \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return
+ breaks = []
+ while self.peek() in u' \r\n\x85\u2028\u2029':
+ if self.peek() == ' ':
+ self.forward()
+ else:
+ breaks.append(self.scan_line_break())
+ prefix = self.prefix(3)
+ if (prefix == u'---' or prefix == u'...') \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return
+ if line_break != u'\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(u' ')
+ chunks.extend(breaks)
+ elif whitespaces:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_tag_handle(self, name, start_mark):
+ # See the specification for details.
+ # For some strange reasons, the specification does not allow '_' in
+ # tag handles. I have allowed it anyway.
+ ch = self.peek()
+ if ch != u'!':
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected '!', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ length = 1
+ ch = self.peek(length)
+ if ch != u' ':
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_':
+ length += 1
+ ch = self.peek(length)
+ if ch != u'!':
+ self.forward(length)
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected '!', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ length += 1
+ value = self.prefix(length)
+ self.forward(length)
+ return value
+
+ def scan_tag_uri(self, name, start_mark):
+ # See the specification for details.
+ # Note: we do not check if URI is well-formed.
+ chunks = []
+ length = 0
+ ch = self.peek(length)
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-;/?:@&=+$,_.!~*\'()[]%':
+ if ch == u'%':
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ length = 0
+ chunks.append(self.scan_uri_escapes(name, start_mark))
+ else:
+ length += 1
+ ch = self.peek(length)
+ if length:
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ length = 0
+ if not chunks:
+ raise ScannerError("while parsing a %s" % name, start_mark,
+ "expected URI, but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ return u''.join(chunks)
+
+ def scan_uri_escapes(self, name, start_mark):
+ # See the specification for details.
+ bytes = []
+ mark = self.get_mark()
+ while self.peek() == u'%':
+ self.forward()
+ for k in range(2):
+ if self.peek(k) not in u'0123456789ABCDEFabcdef':
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected URI escape sequence of 2 hexdecimal numbers, but found %r" %
+ (self.peek(k).encode('utf-8')), self.get_mark())
+ bytes.append(chr(int(self.prefix(2), 16)))
+ self.forward(2)
+ try:
+ value = unicode(''.join(bytes), 'utf-8')
+ except UnicodeDecodeError, exc:
+ raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
+ return value
+
+ def scan_line_break(self):
+ # Transforms:
+ # '\r\n' : '\n'
+ # '\r' : '\n'
+ # '\n' : '\n'
+ # '\x85' : '\n'
+ # '\u2028' : '\u2028'
+ # '\u2029 : '\u2029'
+ # default : ''
+ ch = self.peek()
+ if ch in u'\r\n\x85':
+ if self.prefix(2) == u'\r\n':
+ self.forward(2)
+ else:
+ self.forward()
+ return u'\n'
+ elif ch in u'\u2028\u2029':
+ self.forward()
+ return ch
+ return u''
+
+#try:
+# import psyco
+# psyco.bind(Scanner)
+#except ImportError:
+# pass
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/serializer.py b/collectors/python.d.plugin/python_modules/pyyaml2/serializer.py
new file mode 100644
index 000000000..15fdbb0c0
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/serializer.py
@@ -0,0 +1,112 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['Serializer', 'SerializerError']
+
+from error import YAMLError
+from events import *
+from nodes import *
+
+class SerializerError(YAMLError):
+ pass
+
+class Serializer(object):
+
+ ANCHOR_TEMPLATE = u'id%03d'
+
+ def __init__(self, encoding=None,
+ explicit_start=None, explicit_end=None, version=None, tags=None):
+ self.use_encoding = encoding
+ self.use_explicit_start = explicit_start
+ self.use_explicit_end = explicit_end
+ self.use_version = version
+ self.use_tags = tags
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+ self.closed = None
+
+ def open(self):
+ if self.closed is None:
+ self.emit(StreamStartEvent(encoding=self.use_encoding))
+ self.closed = False
+ elif self.closed:
+ raise SerializerError("serializer is closed")
+ else:
+ raise SerializerError("serializer is already opened")
+
+ def close(self):
+ if self.closed is None:
+ raise SerializerError("serializer is not opened")
+ elif not self.closed:
+ self.emit(StreamEndEvent())
+ self.closed = True
+
+ #def __del__(self):
+ # self.close()
+
+ def serialize(self, node):
+ if self.closed is None:
+ raise SerializerError("serializer is not opened")
+ elif self.closed:
+ raise SerializerError("serializer is closed")
+ self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
+ version=self.use_version, tags=self.use_tags))
+ self.anchor_node(node)
+ self.serialize_node(node, None, None)
+ self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+
+ def anchor_node(self, node):
+ if node in self.anchors:
+ if self.anchors[node] is None:
+ self.anchors[node] = self.generate_anchor(node)
+ else:
+ self.anchors[node] = None
+ if isinstance(node, SequenceNode):
+ for item in node.value:
+ self.anchor_node(item)
+ elif isinstance(node, MappingNode):
+ for key, value in node.value:
+ self.anchor_node(key)
+ self.anchor_node(value)
+
+ def generate_anchor(self, node):
+ self.last_anchor_id += 1
+ return self.ANCHOR_TEMPLATE % self.last_anchor_id
+
+ def serialize_node(self, node, parent, index):
+ alias = self.anchors[node]
+ if node in self.serialized_nodes:
+ self.emit(AliasEvent(alias))
+ else:
+ self.serialized_nodes[node] = True
+ self.descend_resolver(parent, index)
+ if isinstance(node, ScalarNode):
+ detected_tag = self.resolve(ScalarNode, node.value, (True, False))
+ default_tag = self.resolve(ScalarNode, node.value, (False, True))
+ implicit = (node.tag == detected_tag), (node.tag == default_tag)
+ self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
+ style=node.style))
+ elif isinstance(node, SequenceNode):
+ implicit = (node.tag
+ == self.resolve(SequenceNode, node.value, True))
+ self.emit(SequenceStartEvent(alias, node.tag, implicit,
+ flow_style=node.flow_style))
+ index = 0
+ for item in node.value:
+ self.serialize_node(item, node, index)
+ index += 1
+ self.emit(SequenceEndEvent())
+ elif isinstance(node, MappingNode):
+ implicit = (node.tag
+ == self.resolve(MappingNode, node.value, True))
+ self.emit(MappingStartEvent(alias, node.tag, implicit,
+ flow_style=node.flow_style))
+ for key, value in node.value:
+ self.serialize_node(key, node, None)
+ self.serialize_node(value, node, key)
+ self.emit(MappingEndEvent())
+ self.ascend_resolver()
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/tokens.py b/collectors/python.d.plugin/python_modules/pyyaml2/tokens.py
new file mode 100644
index 000000000..c5c4fb116
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/tokens.py
@@ -0,0 +1,105 @@
+# SPDX-License-Identifier: MIT
+
+class Token(object):
+ def __init__(self, start_mark, end_mark):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ attributes = [key for key in self.__dict__
+ if not key.endswith('_mark')]
+ attributes.sort()
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+ for key in attributes])
+ return '%s(%s)' % (self.__class__.__name__, arguments)
+
+#class BOMToken(Token):
+# id = '<byte order mark>'
+
+class DirectiveToken(Token):
+ id = '<directive>'
+ def __init__(self, name, value, start_mark, end_mark):
+ self.name = name
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class DocumentStartToken(Token):
+ id = '<document start>'
+
+class DocumentEndToken(Token):
+ id = '<document end>'
+
+class StreamStartToken(Token):
+ id = '<stream start>'
+ def __init__(self, start_mark=None, end_mark=None,
+ encoding=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.encoding = encoding
+
+class StreamEndToken(Token):
+ id = '<stream end>'
+
+class BlockSequenceStartToken(Token):
+ id = '<block sequence start>'
+
+class BlockMappingStartToken(Token):
+ id = '<block mapping start>'
+
+class BlockEndToken(Token):
+ id = '<block end>'
+
+class FlowSequenceStartToken(Token):
+ id = '['
+
+class FlowMappingStartToken(Token):
+ id = '{'
+
+class FlowSequenceEndToken(Token):
+ id = ']'
+
+class FlowMappingEndToken(Token):
+ id = '}'
+
+class KeyToken(Token):
+ id = '?'
+
+class ValueToken(Token):
+ id = ':'
+
+class BlockEntryToken(Token):
+ id = '-'
+
+class FlowEntryToken(Token):
+ id = ','
+
+class AliasToken(Token):
+ id = '<alias>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class AnchorToken(Token):
+ id = '<anchor>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class TagToken(Token):
+ id = '<tag>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class ScalarToken(Token):
+ id = '<scalar>'
+ def __init__(self, value, plain, start_mark, end_mark, style=None):
+ self.value = value
+ self.plain = plain
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/__init__.py b/collectors/python.d.plugin/python_modules/pyyaml3/__init__.py
new file mode 100644
index 000000000..a884b33cf
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/__init__.py
@@ -0,0 +1,313 @@
+# SPDX-License-Identifier: MIT
+
+from .error import *
+
+from .tokens import *
+from .events import *
+from .nodes import *
+
+from .loader import *
+from .dumper import *
+
+__version__ = '3.11'
+try:
+ from .cyaml import *
+ __with_libyaml__ = True
+except ImportError:
+ __with_libyaml__ = False
+
+import io
+
+def scan(stream, Loader=Loader):
+ """
+ Scan a YAML stream and produce scanning tokens.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_token():
+ yield loader.get_token()
+ finally:
+ loader.dispose()
+
+def parse(stream, Loader=Loader):
+ """
+ Parse a YAML stream and produce parsing events.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_event():
+ yield loader.get_event()
+ finally:
+ loader.dispose()
+
+def compose(stream, Loader=Loader):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding representation tree.
+ """
+ loader = Loader(stream)
+ try:
+ return loader.get_single_node()
+ finally:
+ loader.dispose()
+
+def compose_all(stream, Loader=Loader):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding representation trees.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_node():
+ yield loader.get_node()
+ finally:
+ loader.dispose()
+
+def load(stream, Loader=Loader):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ """
+ loader = Loader(stream)
+ try:
+ return loader.get_single_data()
+ finally:
+ loader.dispose()
+
+def load_all(stream, Loader=Loader):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_data():
+ yield loader.get_data()
+ finally:
+ loader.dispose()
+
+def safe_load(stream):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ Resolve only basic YAML tags.
+ """
+ return load(stream, SafeLoader)
+
+def safe_load_all(stream):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ Resolve only basic YAML tags.
+ """
+ return load_all(stream, SafeLoader)
+
+def emit(events, stream=None, Dumper=Dumper,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None):
+ """
+ Emit YAML parsing events into a stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ stream = io.StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ try:
+ for event in events:
+ dumper.emit(event)
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def serialize_all(nodes, stream=None, Dumper=Dumper,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ """
+ Serialize a sequence of representation trees into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ stream = io.StringIO()
+ else:
+ stream = io.BytesIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break,
+ encoding=encoding, version=version, tags=tags,
+ explicit_start=explicit_start, explicit_end=explicit_end)
+ try:
+ dumper.open()
+ for node in nodes:
+ dumper.serialize(node)
+ dumper.close()
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def serialize(node, stream=None, Dumper=Dumper, **kwds):
+ """
+ Serialize a representation tree into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ return serialize_all([node], stream, Dumper=Dumper, **kwds)
+
+def dump_all(documents, stream=None, Dumper=Dumper,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ stream = io.StringIO()
+ else:
+ stream = io.BytesIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, default_style=default_style,
+ default_flow_style=default_flow_style,
+ canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break,
+ encoding=encoding, version=version, tags=tags,
+ explicit_start=explicit_start, explicit_end=explicit_end)
+ try:
+ dumper.open()
+ for data in documents:
+ dumper.represent(data)
+ dumper.close()
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def dump(data, stream=None, Dumper=Dumper, **kwds):
+ """
+ Serialize a Python object into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all([data], stream, Dumper=Dumper, **kwds)
+
+def safe_dump_all(documents, stream=None, **kwds):
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
+
+def safe_dump(data, stream=None, **kwds):
+ """
+ Serialize a Python object into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all([data], stream, Dumper=SafeDumper, **kwds)
+
+def add_implicit_resolver(tag, regexp, first=None,
+ Loader=Loader, Dumper=Dumper):
+ """
+ Add an implicit scalar detector.
+ If an implicit scalar value matches the given regexp,
+ the corresponding tag is assigned to the scalar.
+ first is a sequence of possible initial characters or None.
+ """
+ Loader.add_implicit_resolver(tag, regexp, first)
+ Dumper.add_implicit_resolver(tag, regexp, first)
+
+def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper):
+ """
+ Add a path based resolver for the given tag.
+ A path is a list of keys that forms a path
+ to a node in the representation tree.
+ Keys can be string values, integers, or None.
+ """
+ Loader.add_path_resolver(tag, path, kind)
+ Dumper.add_path_resolver(tag, path, kind)
+
+def add_constructor(tag, constructor, Loader=Loader):
+ """
+ Add a constructor for the given tag.
+ Constructor is a function that accepts a Loader instance
+ and a node object and produces the corresponding Python object.
+ """
+ Loader.add_constructor(tag, constructor)
+
+def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader):
+ """
+ Add a multi-constructor for the given tag prefix.
+ Multi-constructor is called for a node if its tag starts with tag_prefix.
+ Multi-constructor accepts a Loader instance, a tag suffix,
+ and a node object and produces the corresponding Python object.
+ """
+ Loader.add_multi_constructor(tag_prefix, multi_constructor)
+
+def add_representer(data_type, representer, Dumper=Dumper):
+ """
+ Add a representer for the given type.
+ Representer is a function accepting a Dumper instance
+ and an instance of the given data type
+ and producing the corresponding representation node.
+ """
+ Dumper.add_representer(data_type, representer)
+
+def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
+ """
+ Add a representer for the given type.
+ Multi-representer is a function accepting a Dumper instance
+ and an instance of the given data type or subtype
+ and producing the corresponding representation node.
+ """
+ Dumper.add_multi_representer(data_type, multi_representer)
+
+class YAMLObjectMetaclass(type):
+ """
+ The metaclass for YAMLObject.
+ """
+ def __init__(cls, name, bases, kwds):
+ super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
+ if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
+ cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
+ cls.yaml_dumper.add_representer(cls, cls.to_yaml)
+
+class YAMLObject(metaclass=YAMLObjectMetaclass):
+ """
+ An object that can dump itself to a YAML stream
+ and load itself from a YAML stream.
+ """
+
+ __slots__ = () # no direct instantiation, so allow immutable subclasses
+
+ yaml_loader = Loader
+ yaml_dumper = Dumper
+
+ yaml_tag = None
+ yaml_flow_style = None
+
+ @classmethod
+ def from_yaml(cls, loader, node):
+ """
+ Convert a representation node to a Python object.
+ """
+ return loader.construct_yaml_object(node, cls)
+
+ @classmethod
+ def to_yaml(cls, dumper, data):
+ """
+ Convert a Python object to a representation node.
+ """
+ return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
+ flow_style=cls.yaml_flow_style)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/composer.py b/collectors/python.d.plugin/python_modules/pyyaml3/composer.py
new file mode 100644
index 000000000..c418bba91
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/composer.py
@@ -0,0 +1,140 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['Composer', 'ComposerError']
+
+from .error import MarkedYAMLError
+from .events import *
+from .nodes import *
+
+class ComposerError(MarkedYAMLError):
+ pass
+
+class Composer:
+
+ def __init__(self):
+ self.anchors = {}
+
+ def check_node(self):
+ # Drop the STREAM-START event.
+ if self.check_event(StreamStartEvent):
+ self.get_event()
+
+ # If there are more documents available?
+ return not self.check_event(StreamEndEvent)
+
+ def get_node(self):
+ # Get the root node of the next document.
+ if not self.check_event(StreamEndEvent):
+ return self.compose_document()
+
+ def get_single_node(self):
+ # Drop the STREAM-START event.
+ self.get_event()
+
+ # Compose a document if the stream is not empty.
+ document = None
+ if not self.check_event(StreamEndEvent):
+ document = self.compose_document()
+
+ # Ensure that the stream contains no more documents.
+ if not self.check_event(StreamEndEvent):
+ event = self.get_event()
+ raise ComposerError("expected a single document in the stream",
+ document.start_mark, "but found another document",
+ event.start_mark)
+
+ # Drop the STREAM-END event.
+ self.get_event()
+
+ return document
+
+ def compose_document(self):
+ # Drop the DOCUMENT-START event.
+ self.get_event()
+
+ # Compose the root node.
+ node = self.compose_node(None, None)
+
+ # Drop the DOCUMENT-END event.
+ self.get_event()
+
+ self.anchors = {}
+ return node
+
+ def compose_node(self, parent, index):
+ if self.check_event(AliasEvent):
+ event = self.get_event()
+ anchor = event.anchor
+ if anchor not in self.anchors:
+ raise ComposerError(None, None, "found undefined alias %r"
+ % anchor, event.start_mark)
+ return self.anchors[anchor]
+ event = self.peek_event()
+ anchor = event.anchor
+ if anchor is not None:
+ if anchor in self.anchors:
+ raise ComposerError("found duplicate anchor %r; first occurence"
+ % anchor, self.anchors[anchor].start_mark,
+ "second occurence", event.start_mark)
+ self.descend_resolver(parent, index)
+ if self.check_event(ScalarEvent):
+ node = self.compose_scalar_node(anchor)
+ elif self.check_event(SequenceStartEvent):
+ node = self.compose_sequence_node(anchor)
+ elif self.check_event(MappingStartEvent):
+ node = self.compose_mapping_node(anchor)
+ self.ascend_resolver()
+ return node
+
+ def compose_scalar_node(self, anchor):
+ event = self.get_event()
+ tag = event.tag
+ if tag is None or tag == '!':
+ tag = self.resolve(ScalarNode, event.value, event.implicit)
+ node = ScalarNode(tag, event.value,
+ event.start_mark, event.end_mark, style=event.style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ return node
+
+ def compose_sequence_node(self, anchor):
+ start_event = self.get_event()
+ tag = start_event.tag
+ if tag is None or tag == '!':
+ tag = self.resolve(SequenceNode, None, start_event.implicit)
+ node = SequenceNode(tag, [],
+ start_event.start_mark, None,
+ flow_style=start_event.flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ index = 0
+ while not self.check_event(SequenceEndEvent):
+ node.value.append(self.compose_node(node, index))
+ index += 1
+ end_event = self.get_event()
+ node.end_mark = end_event.end_mark
+ return node
+
+ def compose_mapping_node(self, anchor):
+ start_event = self.get_event()
+ tag = start_event.tag
+ if tag is None or tag == '!':
+ tag = self.resolve(MappingNode, None, start_event.implicit)
+ node = MappingNode(tag, [],
+ start_event.start_mark, None,
+ flow_style=start_event.flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ while not self.check_event(MappingEndEvent):
+ #key_event = self.peek_event()
+ item_key = self.compose_node(node, None)
+ #if item_key in node.value:
+ # raise ComposerError("while composing a mapping", start_event.start_mark,
+ # "found duplicate key", key_event.start_mark)
+ item_value = self.compose_node(node, item_key)
+ #node.value[item_key] = item_value
+ node.value.append((item_key, item_value))
+ end_event = self.get_event()
+ node.end_mark = end_event.end_mark
+ return node
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/constructor.py b/collectors/python.d.plugin/python_modules/pyyaml3/constructor.py
new file mode 100644
index 000000000..ee09a7a7e
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/constructor.py
@@ -0,0 +1,687 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
+ 'ConstructorError']
+
+from .error import *
+from .nodes import *
+
+import collections, datetime, base64, binascii, re, sys, types
+
+class ConstructorError(MarkedYAMLError):
+ pass
+
+class BaseConstructor:
+
+ yaml_constructors = {}
+ yaml_multi_constructors = {}
+
+ def __init__(self):
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.state_generators = []
+ self.deep_construct = False
+
+ def check_data(self):
+ # If there are more documents available?
+ return self.check_node()
+
+ def get_data(self):
+ # Construct and return the next document.
+ if self.check_node():
+ return self.construct_document(self.get_node())
+
+ def get_single_data(self):
+ # Ensure that the stream contains a single document and construct it.
+ node = self.get_single_node()
+ if node is not None:
+ return self.construct_document(node)
+ return None
+
+ def construct_document(self, node):
+ data = self.construct_object(node)
+ while self.state_generators:
+ state_generators = self.state_generators
+ self.state_generators = []
+ for generator in state_generators:
+ for dummy in generator:
+ pass
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.deep_construct = False
+ return data
+
+ def construct_object(self, node, deep=False):
+ if node in self.constructed_objects:
+ return self.constructed_objects[node]
+ if deep:
+ old_deep = self.deep_construct
+ self.deep_construct = True
+ if node in self.recursive_objects:
+ raise ConstructorError(None, None,
+ "found unconstructable recursive node", node.start_mark)
+ self.recursive_objects[node] = None
+ constructor = None
+ tag_suffix = None
+ if node.tag in self.yaml_constructors:
+ constructor = self.yaml_constructors[node.tag]
+ else:
+ for tag_prefix in self.yaml_multi_constructors:
+ if node.tag.startswith(tag_prefix):
+ tag_suffix = node.tag[len(tag_prefix):]
+ constructor = self.yaml_multi_constructors[tag_prefix]
+ break
+ else:
+ if None in self.yaml_multi_constructors:
+ tag_suffix = node.tag
+ constructor = self.yaml_multi_constructors[None]
+ elif None in self.yaml_constructors:
+ constructor = self.yaml_constructors[None]
+ elif isinstance(node, ScalarNode):
+ constructor = self.__class__.construct_scalar
+ elif isinstance(node, SequenceNode):
+ constructor = self.__class__.construct_sequence
+ elif isinstance(node, MappingNode):
+ constructor = self.__class__.construct_mapping
+ if tag_suffix is None:
+ data = constructor(self, node)
+ else:
+ data = constructor(self, tag_suffix, node)
+ if isinstance(data, types.GeneratorType):
+ generator = data
+ data = next(generator)
+ if self.deep_construct:
+ for dummy in generator:
+ pass
+ else:
+ self.state_generators.append(generator)
+ self.constructed_objects[node] = data
+ del self.recursive_objects[node]
+ if deep:
+ self.deep_construct = old_deep
+ return data
+
+ def construct_scalar(self, node):
+ if not isinstance(node, ScalarNode):
+ raise ConstructorError(None, None,
+ "expected a scalar node, but found %s" % node.id,
+ node.start_mark)
+ return node.value
+
+ def construct_sequence(self, node, deep=False):
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError(None, None,
+ "expected a sequence node, but found %s" % node.id,
+ node.start_mark)
+ return [self.construct_object(child, deep=deep)
+ for child in node.value]
+
+ def construct_mapping(self, node, deep=False):
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+ mapping = {}
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ if not isinstance(key, collections.Hashable):
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "found unhashable key", key_node.start_mark)
+ value = self.construct_object(value_node, deep=deep)
+ mapping[key] = value
+ return mapping
+
+ def construct_pairs(self, node, deep=False):
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+ pairs = []
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ value = self.construct_object(value_node, deep=deep)
+ pairs.append((key, value))
+ return pairs
+
+ @classmethod
+ def add_constructor(cls, tag, constructor):
+ if not 'yaml_constructors' in cls.__dict__:
+ cls.yaml_constructors = cls.yaml_constructors.copy()
+ cls.yaml_constructors[tag] = constructor
+
+ @classmethod
+ def add_multi_constructor(cls, tag_prefix, multi_constructor):
+ if not 'yaml_multi_constructors' in cls.__dict__:
+ cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
+ cls.yaml_multi_constructors[tag_prefix] = multi_constructor
+
+class SafeConstructor(BaseConstructor):
+
+ def construct_scalar(self, node):
+ if isinstance(node, MappingNode):
+ for key_node, value_node in node.value:
+ if key_node.tag == 'tag:yaml.org,2002:value':
+ return self.construct_scalar(value_node)
+ return super().construct_scalar(node)
+
+ def flatten_mapping(self, node):
+ merge = []
+ index = 0
+ while index < len(node.value):
+ key_node, value_node = node.value[index]
+ if key_node.tag == 'tag:yaml.org,2002:merge':
+ del node.value[index]
+ if isinstance(value_node, MappingNode):
+ self.flatten_mapping(value_node)
+ merge.extend(value_node.value)
+ elif isinstance(value_node, SequenceNode):
+ submerge = []
+ for subnode in value_node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing a mapping",
+ node.start_mark,
+ "expected a mapping for merging, but found %s"
+ % subnode.id, subnode.start_mark)
+ self.flatten_mapping(subnode)
+ submerge.append(subnode.value)
+ submerge.reverse()
+ for value in submerge:
+ merge.extend(value)
+ else:
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "expected a mapping or list of mappings for merging, but found %s"
+ % value_node.id, value_node.start_mark)
+ elif key_node.tag == 'tag:yaml.org,2002:value':
+ key_node.tag = 'tag:yaml.org,2002:str'
+ index += 1
+ else:
+ index += 1
+ if merge:
+ node.value = merge + node.value
+
+ def construct_mapping(self, node, deep=False):
+ if isinstance(node, MappingNode):
+ self.flatten_mapping(node)
+ return super().construct_mapping(node, deep=deep)
+
+ def construct_yaml_null(self, node):
+ self.construct_scalar(node)
+ return None
+
+ bool_values = {
+ 'yes': True,
+ 'no': False,
+ 'true': True,
+ 'false': False,
+ 'on': True,
+ 'off': False,
+ }
+
+ def construct_yaml_bool(self, node):
+ value = self.construct_scalar(node)
+ return self.bool_values[value.lower()]
+
+ def construct_yaml_int(self, node):
+ value = self.construct_scalar(node)
+ value = value.replace('_', '')
+ sign = +1
+ if value[0] == '-':
+ sign = -1
+ if value[0] in '+-':
+ value = value[1:]
+ if value == '0':
+ return 0
+ elif value.startswith('0b'):
+ return sign*int(value[2:], 2)
+ elif value.startswith('0x'):
+ return sign*int(value[2:], 16)
+ elif value[0] == '0':
+ return sign*int(value, 8)
+ elif ':' in value:
+ digits = [int(part) for part in value.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0
+ for digit in digits:
+ value += digit*base
+ base *= 60
+ return sign*value
+ else:
+ return sign*int(value)
+
+ inf_value = 1e300
+ while inf_value != inf_value*inf_value:
+ inf_value *= inf_value
+ nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
+
+ def construct_yaml_float(self, node):
+ value = self.construct_scalar(node)
+ value = value.replace('_', '').lower()
+ sign = +1
+ if value[0] == '-':
+ sign = -1
+ if value[0] in '+-':
+ value = value[1:]
+ if value == '.inf':
+ return sign*self.inf_value
+ elif value == '.nan':
+ return self.nan_value
+ elif ':' in value:
+ digits = [float(part) for part in value.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0.0
+ for digit in digits:
+ value += digit*base
+ base *= 60
+ return sign*value
+ else:
+ return sign*float(value)
+
+ def construct_yaml_binary(self, node):
+ try:
+ value = self.construct_scalar(node).encode('ascii')
+ except UnicodeEncodeError as exc:
+ raise ConstructorError(None, None,
+ "failed to convert base64 data into ascii: %s" % exc,
+ node.start_mark)
+ try:
+ if hasattr(base64, 'decodebytes'):
+ return base64.decodebytes(value)
+ else:
+ return base64.decodestring(value)
+ except binascii.Error as exc:
+ raise ConstructorError(None, None,
+ "failed to decode base64 data: %s" % exc, node.start_mark)
+
+ timestamp_regexp = re.compile(
+ r'''^(?P<year>[0-9][0-9][0-9][0-9])
+ -(?P<month>[0-9][0-9]?)
+ -(?P<day>[0-9][0-9]?)
+ (?:(?:[Tt]|[ \t]+)
+ (?P<hour>[0-9][0-9]?)
+ :(?P<minute>[0-9][0-9])
+ :(?P<second>[0-9][0-9])
+ (?:\.(?P<fraction>[0-9]*))?
+ (?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
+ (?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
+
+ def construct_yaml_timestamp(self, node):
+ value = self.construct_scalar(node)
+ match = self.timestamp_regexp.match(node.value)
+ values = match.groupdict()
+ year = int(values['year'])
+ month = int(values['month'])
+ day = int(values['day'])
+ if not values['hour']:
+ return datetime.date(year, month, day)
+ hour = int(values['hour'])
+ minute = int(values['minute'])
+ second = int(values['second'])
+ fraction = 0
+ if values['fraction']:
+ fraction = values['fraction'][:6]
+ while len(fraction) < 6:
+ fraction += '0'
+ fraction = int(fraction)
+ delta = None
+ if values['tz_sign']:
+ tz_hour = int(values['tz_hour'])
+ tz_minute = int(values['tz_minute'] or 0)
+ delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
+ if values['tz_sign'] == '-':
+ delta = -delta
+ data = datetime.datetime(year, month, day, hour, minute, second, fraction)
+ if delta:
+ data -= delta
+ return data
+
+ def construct_yaml_omap(self, node):
+ # Note: we do not check for duplicate keys, because it's too
+ # CPU-expensive.
+ omap = []
+ yield omap
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a sequence, but found %s" % node.id, node.start_mark)
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a mapping of length 1, but found %s" % subnode.id,
+ subnode.start_mark)
+ if len(subnode.value) != 1:
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a single mapping item, but found %d items" % len(subnode.value),
+ subnode.start_mark)
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ omap.append((key, value))
+
+ def construct_yaml_pairs(self, node):
+ # Note: the same code as `construct_yaml_omap`.
+ pairs = []
+ yield pairs
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a sequence, but found %s" % node.id, node.start_mark)
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a mapping of length 1, but found %s" % subnode.id,
+ subnode.start_mark)
+ if len(subnode.value) != 1:
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a single mapping item, but found %d items" % len(subnode.value),
+ subnode.start_mark)
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ pairs.append((key, value))
+
+ def construct_yaml_set(self, node):
+ data = set()
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_str(self, node):
+ return self.construct_scalar(node)
+
+ def construct_yaml_seq(self, node):
+ data = []
+ yield data
+ data.extend(self.construct_sequence(node))
+
+ def construct_yaml_map(self, node):
+ data = {}
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_object(self, node, cls):
+ data = cls.__new__(cls)
+ yield data
+ if hasattr(data, '__setstate__'):
+ state = self.construct_mapping(node, deep=True)
+ data.__setstate__(state)
+ else:
+ state = self.construct_mapping(node)
+ data.__dict__.update(state)
+
+ def construct_undefined(self, node):
+ raise ConstructorError(None, None,
+ "could not determine a constructor for the tag %r" % node.tag,
+ node.start_mark)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:null',
+ SafeConstructor.construct_yaml_null)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:bool',
+ SafeConstructor.construct_yaml_bool)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:int',
+ SafeConstructor.construct_yaml_int)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:float',
+ SafeConstructor.construct_yaml_float)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:binary',
+ SafeConstructor.construct_yaml_binary)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:timestamp',
+ SafeConstructor.construct_yaml_timestamp)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:omap',
+ SafeConstructor.construct_yaml_omap)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:pairs',
+ SafeConstructor.construct_yaml_pairs)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:set',
+ SafeConstructor.construct_yaml_set)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:str',
+ SafeConstructor.construct_yaml_str)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:seq',
+ SafeConstructor.construct_yaml_seq)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:map',
+ SafeConstructor.construct_yaml_map)
+
+SafeConstructor.add_constructor(None,
+ SafeConstructor.construct_undefined)
+
+class Constructor(SafeConstructor):
+
+ def construct_python_str(self, node):
+ return self.construct_scalar(node)
+
+ def construct_python_unicode(self, node):
+ return self.construct_scalar(node)
+
+ def construct_python_bytes(self, node):
+ try:
+ value = self.construct_scalar(node).encode('ascii')
+ except UnicodeEncodeError as exc:
+ raise ConstructorError(None, None,
+ "failed to convert base64 data into ascii: %s" % exc,
+ node.start_mark)
+ try:
+ if hasattr(base64, 'decodebytes'):
+ return base64.decodebytes(value)
+ else:
+ return base64.decodestring(value)
+ except binascii.Error as exc:
+ raise ConstructorError(None, None,
+ "failed to decode base64 data: %s" % exc, node.start_mark)
+
+ def construct_python_long(self, node):
+ return self.construct_yaml_int(node)
+
+ def construct_python_complex(self, node):
+ return complex(self.construct_scalar(node))
+
+ def construct_python_tuple(self, node):
+ return tuple(self.construct_sequence(node))
+
+ def find_python_module(self, name, mark):
+ if not name:
+ raise ConstructorError("while constructing a Python module", mark,
+ "expected non-empty name appended to the tag", mark)
+ try:
+ __import__(name)
+ except ImportError as exc:
+ raise ConstructorError("while constructing a Python module", mark,
+ "cannot find module %r (%s)" % (name, exc), mark)
+ return sys.modules[name]
+
+ def find_python_name(self, name, mark):
+ if not name:
+ raise ConstructorError("while constructing a Python object", mark,
+ "expected non-empty name appended to the tag", mark)
+ if '.' in name:
+ module_name, object_name = name.rsplit('.', 1)
+ else:
+ module_name = 'builtins'
+ object_name = name
+ try:
+ __import__(module_name)
+ except ImportError as exc:
+ raise ConstructorError("while constructing a Python object", mark,
+ "cannot find module %r (%s)" % (module_name, exc), mark)
+ module = sys.modules[module_name]
+ if not hasattr(module, object_name):
+ raise ConstructorError("while constructing a Python object", mark,
+ "cannot find %r in the module %r"
+ % (object_name, module.__name__), mark)
+ return getattr(module, object_name)
+
+ def construct_python_name(self, suffix, node):
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError("while constructing a Python name", node.start_mark,
+ "expected the empty value, but found %r" % value, node.start_mark)
+ return self.find_python_name(suffix, node.start_mark)
+
+ def construct_python_module(self, suffix, node):
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError("while constructing a Python module", node.start_mark,
+ "expected the empty value, but found %r" % value, node.start_mark)
+ return self.find_python_module(suffix, node.start_mark)
+
+ def make_python_instance(self, suffix, node,
+ args=None, kwds=None, newobj=False):
+ if not args:
+ args = []
+ if not kwds:
+ kwds = {}
+ cls = self.find_python_name(suffix, node.start_mark)
+ if newobj and isinstance(cls, type):
+ return cls.__new__(cls, *args, **kwds)
+ else:
+ return cls(*args, **kwds)
+
+ def set_python_instance_state(self, instance, state):
+ if hasattr(instance, '__setstate__'):
+ instance.__setstate__(state)
+ else:
+ slotstate = {}
+ if isinstance(state, tuple) and len(state) == 2:
+ state, slotstate = state
+ if hasattr(instance, '__dict__'):
+ instance.__dict__.update(state)
+ elif state:
+ slotstate.update(state)
+ for key, value in slotstate.items():
+ setattr(object, key, value)
+
+ def construct_python_object(self, suffix, node):
+ # Format:
+ # !!python/object:module.name { ... state ... }
+ instance = self.make_python_instance(suffix, node, newobj=True)
+ yield instance
+ deep = hasattr(instance, '__setstate__')
+ state = self.construct_mapping(node, deep=deep)
+ self.set_python_instance_state(instance, state)
+
+ def construct_python_object_apply(self, suffix, node, newobj=False):
+ # Format:
+ # !!python/object/apply # (or !!python/object/new)
+ # args: [ ... arguments ... ]
+ # kwds: { ... keywords ... }
+ # state: ... state ...
+ # listitems: [ ... listitems ... ]
+ # dictitems: { ... dictitems ... }
+ # or short format:
+ # !!python/object/apply [ ... arguments ... ]
+ # The difference between !!python/object/apply and !!python/object/new
+ # is how an object is created, check make_python_instance for details.
+ if isinstance(node, SequenceNode):
+ args = self.construct_sequence(node, deep=True)
+ kwds = {}
+ state = {}
+ listitems = []
+ dictitems = {}
+ else:
+ value = self.construct_mapping(node, deep=True)
+ args = value.get('args', [])
+ kwds = value.get('kwds', {})
+ state = value.get('state', {})
+ listitems = value.get('listitems', [])
+ dictitems = value.get('dictitems', {})
+ instance = self.make_python_instance(suffix, node, args, kwds, newobj)
+ if state:
+ self.set_python_instance_state(instance, state)
+ if listitems:
+ instance.extend(listitems)
+ if dictitems:
+ for key in dictitems:
+ instance[key] = dictitems[key]
+ return instance
+
+ def construct_python_object_new(self, suffix, node):
+ return self.construct_python_object_apply(suffix, node, newobj=True)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/none',
+ Constructor.construct_yaml_null)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/bool',
+ Constructor.construct_yaml_bool)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/str',
+ Constructor.construct_python_str)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/unicode',
+ Constructor.construct_python_unicode)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/bytes',
+ Constructor.construct_python_bytes)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/int',
+ Constructor.construct_yaml_int)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/long',
+ Constructor.construct_python_long)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/float',
+ Constructor.construct_yaml_float)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/complex',
+ Constructor.construct_python_complex)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/list',
+ Constructor.construct_yaml_seq)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/tuple',
+ Constructor.construct_python_tuple)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/dict',
+ Constructor.construct_yaml_map)
+
+Constructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/name:',
+ Constructor.construct_python_name)
+
+Constructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/module:',
+ Constructor.construct_python_module)
+
+Constructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/object:',
+ Constructor.construct_python_object)
+
+Constructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/object/apply:',
+ Constructor.construct_python_object_apply)
+
+Constructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/object/new:',
+ Constructor.construct_python_object_new)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py b/collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py
new file mode 100644
index 000000000..e6c16d894
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
+ 'CBaseDumper', 'CSafeDumper', 'CDumper']
+
+from _yaml import CParser, CEmitter
+
+from .constructor import *
+
+from .serializer import *
+from .representer import *
+
+from .resolver import *
+
+class CBaseLoader(CParser, BaseConstructor, BaseResolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ BaseConstructor.__init__(self)
+ BaseResolver.__init__(self)
+
+class CSafeLoader(CParser, SafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ SafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class CLoader(CParser, Constructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
+
+class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ SafeRepresenter.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class CDumper(CEmitter, Serializer, Representer, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/dumper.py b/collectors/python.d.plugin/python_modules/pyyaml3/dumper.py
new file mode 100644
index 000000000..ba590c6e6
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/dumper.py
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
+
+from .emitter import *
+from .serializer import *
+from .representer import *
+from .resolver import *
+
+class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ SafeRepresenter.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class Dumper(Emitter, Serializer, Representer, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/emitter.py b/collectors/python.d.plugin/python_modules/pyyaml3/emitter.py
new file mode 100644
index 000000000..d4be65a8e
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/emitter.py
@@ -0,0 +1,1138 @@
+# SPDX-License-Identifier: MIT
+
+# Emitter expects events obeying the following grammar:
+# stream ::= STREAM-START document* STREAM-END
+# document ::= DOCUMENT-START node DOCUMENT-END
+# node ::= SCALAR | sequence | mapping
+# sequence ::= SEQUENCE-START node* SEQUENCE-END
+# mapping ::= MAPPING-START (node node)* MAPPING-END
+
+__all__ = ['Emitter', 'EmitterError']
+
+from .error import YAMLError
+from .events import *
+
+class EmitterError(YAMLError):
+ pass
+
+class ScalarAnalysis:
+ def __init__(self, scalar, empty, multiline,
+ allow_flow_plain, allow_block_plain,
+ allow_single_quoted, allow_double_quoted,
+ allow_block):
+ self.scalar = scalar
+ self.empty = empty
+ self.multiline = multiline
+ self.allow_flow_plain = allow_flow_plain
+ self.allow_block_plain = allow_block_plain
+ self.allow_single_quoted = allow_single_quoted
+ self.allow_double_quoted = allow_double_quoted
+ self.allow_block = allow_block
+
+class Emitter:
+
+ DEFAULT_TAG_PREFIXES = {
+ '!' : '!',
+ 'tag:yaml.org,2002:' : '!!',
+ }
+
+ def __init__(self, stream, canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None):
+
+ # The stream should have the methods `write` and possibly `flush`.
+ self.stream = stream
+
+ # Encoding can be overriden by STREAM-START.
+ self.encoding = None
+
+ # Emitter is a state machine with a stack of states to handle nested
+ # structures.
+ self.states = []
+ self.state = self.expect_stream_start
+
+ # Current event and the event queue.
+ self.events = []
+ self.event = None
+
+ # The current indentation level and the stack of previous indents.
+ self.indents = []
+ self.indent = None
+
+ # Flow level.
+ self.flow_level = 0
+
+ # Contexts.
+ self.root_context = False
+ self.sequence_context = False
+ self.mapping_context = False
+ self.simple_key_context = False
+
+ # Characteristics of the last emitted character:
+ # - current position.
+ # - is it a whitespace?
+ # - is it an indention character
+ # (indentation space, '-', '?', or ':')?
+ self.line = 0
+ self.column = 0
+ self.whitespace = True
+ self.indention = True
+
+ # Whether the document requires an explicit document indicator
+ self.open_ended = False
+
+ # Formatting details.
+ self.canonical = canonical
+ self.allow_unicode = allow_unicode
+ self.best_indent = 2
+ if indent and 1 < indent < 10:
+ self.best_indent = indent
+ self.best_width = 80
+ if width and width > self.best_indent*2:
+ self.best_width = width
+ self.best_line_break = '\n'
+ if line_break in ['\r', '\n', '\r\n']:
+ self.best_line_break = line_break
+
+ # Tag prefixes.
+ self.tag_prefixes = None
+
+ # Prepared anchor and tag.
+ self.prepared_anchor = None
+ self.prepared_tag = None
+
+ # Scalar analysis and style.
+ self.analysis = None
+ self.style = None
+
+ def dispose(self):
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def emit(self, event):
+ self.events.append(event)
+ while not self.need_more_events():
+ self.event = self.events.pop(0)
+ self.state()
+ self.event = None
+
+ # In some cases, we wait for a few next events before emitting.
+
+ def need_more_events(self):
+ if not self.events:
+ return True
+ event = self.events[0]
+ if isinstance(event, DocumentStartEvent):
+ return self.need_events(1)
+ elif isinstance(event, SequenceStartEvent):
+ return self.need_events(2)
+ elif isinstance(event, MappingStartEvent):
+ return self.need_events(3)
+ else:
+ return False
+
+ def need_events(self, count):
+ level = 0
+ for event in self.events[1:]:
+ if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
+ level += 1
+ elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
+ level -= 1
+ elif isinstance(event, StreamEndEvent):
+ level = -1
+ if level < 0:
+ return False
+ return (len(self.events) < count+1)
+
+ def increase_indent(self, flow=False, indentless=False):
+ self.indents.append(self.indent)
+ if self.indent is None:
+ if flow:
+ self.indent = self.best_indent
+ else:
+ self.indent = 0
+ elif not indentless:
+ self.indent += self.best_indent
+
+ # States.
+
+ # Stream handlers.
+
+ def expect_stream_start(self):
+ if isinstance(self.event, StreamStartEvent):
+ if self.event.encoding and not hasattr(self.stream, 'encoding'):
+ self.encoding = self.event.encoding
+ self.write_stream_start()
+ self.state = self.expect_first_document_start
+ else:
+ raise EmitterError("expected StreamStartEvent, but got %s"
+ % self.event)
+
+ def expect_nothing(self):
+ raise EmitterError("expected nothing, but got %s" % self.event)
+
+ # Document handlers.
+
+ def expect_first_document_start(self):
+ return self.expect_document_start(first=True)
+
+ def expect_document_start(self, first=False):
+ if isinstance(self.event, DocumentStartEvent):
+ if (self.event.version or self.event.tags) and self.open_ended:
+ self.write_indicator('...', True)
+ self.write_indent()
+ if self.event.version:
+ version_text = self.prepare_version(self.event.version)
+ self.write_version_directive(version_text)
+ self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
+ if self.event.tags:
+ handles = sorted(self.event.tags.keys())
+ for handle in handles:
+ prefix = self.event.tags[handle]
+ self.tag_prefixes[prefix] = handle
+ handle_text = self.prepare_tag_handle(handle)
+ prefix_text = self.prepare_tag_prefix(prefix)
+ self.write_tag_directive(handle_text, prefix_text)
+ implicit = (first and not self.event.explicit and not self.canonical
+ and not self.event.version and not self.event.tags
+ and not self.check_empty_document())
+ if not implicit:
+ self.write_indent()
+ self.write_indicator('---', True)
+ if self.canonical:
+ self.write_indent()
+ self.state = self.expect_document_root
+ elif isinstance(self.event, StreamEndEvent):
+ if self.open_ended:
+ self.write_indicator('...', True)
+ self.write_indent()
+ self.write_stream_end()
+ self.state = self.expect_nothing
+ else:
+ raise EmitterError("expected DocumentStartEvent, but got %s"
+ % self.event)
+
+ def expect_document_end(self):
+ if isinstance(self.event, DocumentEndEvent):
+ self.write_indent()
+ if self.event.explicit:
+ self.write_indicator('...', True)
+ self.write_indent()
+ self.flush_stream()
+ self.state = self.expect_document_start
+ else:
+ raise EmitterError("expected DocumentEndEvent, but got %s"
+ % self.event)
+
+ def expect_document_root(self):
+ self.states.append(self.expect_document_end)
+ self.expect_node(root=True)
+
+ # Node handlers.
+
+ def expect_node(self, root=False, sequence=False, mapping=False,
+ simple_key=False):
+ self.root_context = root
+ self.sequence_context = sequence
+ self.mapping_context = mapping
+ self.simple_key_context = simple_key
+ if isinstance(self.event, AliasEvent):
+ self.expect_alias()
+ elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
+ self.process_anchor('&')
+ self.process_tag()
+ if isinstance(self.event, ScalarEvent):
+ self.expect_scalar()
+ elif isinstance(self.event, SequenceStartEvent):
+ if self.flow_level or self.canonical or self.event.flow_style \
+ or self.check_empty_sequence():
+ self.expect_flow_sequence()
+ else:
+ self.expect_block_sequence()
+ elif isinstance(self.event, MappingStartEvent):
+ if self.flow_level or self.canonical or self.event.flow_style \
+ or self.check_empty_mapping():
+ self.expect_flow_mapping()
+ else:
+ self.expect_block_mapping()
+ else:
+ raise EmitterError("expected NodeEvent, but got %s" % self.event)
+
+ def expect_alias(self):
+ if self.event.anchor is None:
+ raise EmitterError("anchor is not specified for alias")
+ self.process_anchor('*')
+ self.state = self.states.pop()
+
+ def expect_scalar(self):
+ self.increase_indent(flow=True)
+ self.process_scalar()
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+
+ # Flow sequence handlers.
+
+ def expect_flow_sequence(self):
+ self.write_indicator('[', True, whitespace=True)
+ self.flow_level += 1
+ self.increase_indent(flow=True)
+ self.state = self.expect_first_flow_sequence_item
+
+ def expect_first_flow_sequence_item(self):
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ self.write_indicator(']', False)
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ def expect_flow_sequence_item(self):
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ if self.canonical:
+ self.write_indicator(',', False)
+ self.write_indent()
+ self.write_indicator(']', False)
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Flow mapping handlers.
+
+ def expect_flow_mapping(self):
+ self.write_indicator('{', True, whitespace=True)
+ self.flow_level += 1
+ self.increase_indent(flow=True)
+ self.state = self.expect_first_flow_mapping_key
+
+ def expect_first_flow_mapping_key(self):
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ self.write_indicator('}', False)
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator('?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_key(self):
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ if self.canonical:
+ self.write_indicator(',', False)
+ self.write_indent()
+ self.write_indicator('}', False)
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator('?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_simple_value(self):
+ self.write_indicator(':', False)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_value(self):
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.write_indicator(':', True)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Block sequence handlers.
+
+ def expect_block_sequence(self):
+ indentless = (self.mapping_context and not self.indention)
+ self.increase_indent(flow=False, indentless=indentless)
+ self.state = self.expect_first_block_sequence_item
+
+ def expect_first_block_sequence_item(self):
+ return self.expect_block_sequence_item(first=True)
+
+ def expect_block_sequence_item(self, first=False):
+ if not first and isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ self.write_indent()
+ self.write_indicator('-', True, indention=True)
+ self.states.append(self.expect_block_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Block mapping handlers.
+
+ def expect_block_mapping(self):
+ self.increase_indent(flow=False)
+ self.state = self.expect_first_block_mapping_key
+
+ def expect_first_block_mapping_key(self):
+ return self.expect_block_mapping_key(first=True)
+
+ def expect_block_mapping_key(self, first=False):
+ if not first and isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ self.write_indent()
+ if self.check_simple_key():
+ self.states.append(self.expect_block_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator('?', True, indention=True)
+ self.states.append(self.expect_block_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_simple_value(self):
+ self.write_indicator(':', False)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_value(self):
+ self.write_indent()
+ self.write_indicator(':', True, indention=True)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Checkers.
+
+ def check_empty_sequence(self):
+ return (isinstance(self.event, SequenceStartEvent) and self.events
+ and isinstance(self.events[0], SequenceEndEvent))
+
+ def check_empty_mapping(self):
+ return (isinstance(self.event, MappingStartEvent) and self.events
+ and isinstance(self.events[0], MappingEndEvent))
+
+ def check_empty_document(self):
+ if not isinstance(self.event, DocumentStartEvent) or not self.events:
+ return False
+ event = self.events[0]
+ return (isinstance(event, ScalarEvent) and event.anchor is None
+ and event.tag is None and event.implicit and event.value == '')
+
+ def check_simple_key(self):
+ length = 0
+ if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ length += len(self.prepared_anchor)
+ if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \
+ and self.event.tag is not None:
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(self.event.tag)
+ length += len(self.prepared_tag)
+ if isinstance(self.event, ScalarEvent):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ length += len(self.analysis.scalar)
+ return (length < 128 and (isinstance(self.event, AliasEvent)
+ or (isinstance(self.event, ScalarEvent)
+ and not self.analysis.empty and not self.analysis.multiline)
+ or self.check_empty_sequence() or self.check_empty_mapping()))
+
+ # Anchor, Tag, and Scalar processors.
+
+ def process_anchor(self, indicator):
+ if self.event.anchor is None:
+ self.prepared_anchor = None
+ return
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ if self.prepared_anchor:
+ self.write_indicator(indicator+self.prepared_anchor, True)
+ self.prepared_anchor = None
+
+ def process_tag(self):
+ tag = self.event.tag
+ if isinstance(self.event, ScalarEvent):
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ if ((not self.canonical or tag is None) and
+ ((self.style == '' and self.event.implicit[0])
+ or (self.style != '' and self.event.implicit[1]))):
+ self.prepared_tag = None
+ return
+ if self.event.implicit[0] and tag is None:
+ tag = '!'
+ self.prepared_tag = None
+ else:
+ if (not self.canonical or tag is None) and self.event.implicit:
+ self.prepared_tag = None
+ return
+ if tag is None:
+ raise EmitterError("tag is not specified")
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(tag)
+ if self.prepared_tag:
+ self.write_indicator(self.prepared_tag, True)
+ self.prepared_tag = None
+
+ def choose_scalar_style(self):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.event.style == '"' or self.canonical:
+ return '"'
+ if not self.event.style and self.event.implicit[0]:
+ if (not (self.simple_key_context and
+ (self.analysis.empty or self.analysis.multiline))
+ and (self.flow_level and self.analysis.allow_flow_plain
+ or (not self.flow_level and self.analysis.allow_block_plain))):
+ return ''
+ if self.event.style and self.event.style in '|>':
+ if (not self.flow_level and not self.simple_key_context
+ and self.analysis.allow_block):
+ return self.event.style
+ if not self.event.style or self.event.style == '\'':
+ if (self.analysis.allow_single_quoted and
+ not (self.simple_key_context and self.analysis.multiline)):
+ return '\''
+ return '"'
+
+ def process_scalar(self):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ split = (not self.simple_key_context)
+ #if self.analysis.multiline and split \
+ # and (not self.style or self.style in '\'\"'):
+ # self.write_indent()
+ if self.style == '"':
+ self.write_double_quoted(self.analysis.scalar, split)
+ elif self.style == '\'':
+ self.write_single_quoted(self.analysis.scalar, split)
+ elif self.style == '>':
+ self.write_folded(self.analysis.scalar)
+ elif self.style == '|':
+ self.write_literal(self.analysis.scalar)
+ else:
+ self.write_plain(self.analysis.scalar, split)
+ self.analysis = None
+ self.style = None
+
+ # Analyzers.
+
+ def prepare_version(self, version):
+ major, minor = version
+ if major != 1:
+ raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
+ return '%d.%d' % (major, minor)
+
+ def prepare_tag_handle(self, handle):
+ if not handle:
+ raise EmitterError("tag handle must not be empty")
+ if handle[0] != '!' or handle[-1] != '!':
+ raise EmitterError("tag handle must start and end with '!': %r" % handle)
+ for ch in handle[1:-1]:
+ if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_'):
+ raise EmitterError("invalid character %r in the tag handle: %r"
+ % (ch, handle))
+ return handle
+
+ def prepare_tag_prefix(self, prefix):
+ if not prefix:
+ raise EmitterError("tag prefix must not be empty")
+ chunks = []
+ start = end = 0
+ if prefix[0] == '!':
+ end = 1
+ while end < len(prefix):
+ ch = prefix[end]
+ if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-;/?!:@&=+$,_.~*\'()[]':
+ end += 1
+ else:
+ if start < end:
+ chunks.append(prefix[start:end])
+ start = end = end+1
+ data = ch.encode('utf-8')
+ for ch in data:
+ chunks.append('%%%02X' % ord(ch))
+ if start < end:
+ chunks.append(prefix[start:end])
+ return ''.join(chunks)
+
+ def prepare_tag(self, tag):
+ if not tag:
+ raise EmitterError("tag must not be empty")
+ if tag == '!':
+ return tag
+ handle = None
+ suffix = tag
+ prefixes = sorted(self.tag_prefixes.keys())
+ for prefix in prefixes:
+ if tag.startswith(prefix) \
+ and (prefix == '!' or len(prefix) < len(tag)):
+ handle = self.tag_prefixes[prefix]
+ suffix = tag[len(prefix):]
+ chunks = []
+ start = end = 0
+ while end < len(suffix):
+ ch = suffix[end]
+ if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-;/?:@&=+$,_.~*\'()[]' \
+ or (ch == '!' and handle != '!'):
+ end += 1
+ else:
+ if start < end:
+ chunks.append(suffix[start:end])
+ start = end = end+1
+ data = ch.encode('utf-8')
+ for ch in data:
+ chunks.append('%%%02X' % ord(ch))
+ if start < end:
+ chunks.append(suffix[start:end])
+ suffix_text = ''.join(chunks)
+ if handle:
+ return '%s%s' % (handle, suffix_text)
+ else:
+ return '!<%s>' % suffix_text
+
+ def prepare_anchor(self, anchor):
+ if not anchor:
+ raise EmitterError("anchor must not be empty")
+ for ch in anchor:
+ if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_'):
+ raise EmitterError("invalid character %r in the anchor: %r"
+ % (ch, anchor))
+ return anchor
+
+ def analyze_scalar(self, scalar):
+
+ # Empty scalar is a special case.
+ if not scalar:
+ return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
+ allow_flow_plain=False, allow_block_plain=True,
+ allow_single_quoted=True, allow_double_quoted=True,
+ allow_block=False)
+
+ # Indicators and special characters.
+ block_indicators = False
+ flow_indicators = False
+ line_breaks = False
+ special_characters = False
+
+ # Important whitespace combinations.
+ leading_space = False
+ leading_break = False
+ trailing_space = False
+ trailing_break = False
+ break_space = False
+ space_break = False
+
+ # Check document indicators.
+ if scalar.startswith('---') or scalar.startswith('...'):
+ block_indicators = True
+ flow_indicators = True
+
+ # First character or preceded by a whitespace.
+ preceeded_by_whitespace = True
+
+ # Last character or followed by a whitespace.
+ followed_by_whitespace = (len(scalar) == 1 or
+ scalar[1] in '\0 \t\r\n\x85\u2028\u2029')
+
+ # The previous character is a space.
+ previous_space = False
+
+ # The previous character is a break.
+ previous_break = False
+
+ index = 0
+ while index < len(scalar):
+ ch = scalar[index]
+
+ # Check for indicators.
+ if index == 0:
+ # Leading indicators are special characters.
+ if ch in '#,[]{}&*!|>\'\"%@`':
+ flow_indicators = True
+ block_indicators = True
+ if ch in '?:':
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == '-' and followed_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+ else:
+ # Some indicators cannot appear within a scalar as well.
+ if ch in ',?[]{}':
+ flow_indicators = True
+ if ch == ':':
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == '#' and preceeded_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+
+ # Check for line breaks, special, and unicode characters.
+ if ch in '\n\x85\u2028\u2029':
+ line_breaks = True
+ if not (ch == '\n' or '\x20' <= ch <= '\x7E'):
+ if (ch == '\x85' or '\xA0' <= ch <= '\uD7FF'
+ or '\uE000' <= ch <= '\uFFFD') and ch != '\uFEFF':
+ unicode_characters = True
+ if not self.allow_unicode:
+ special_characters = True
+ else:
+ special_characters = True
+
+ # Detect important whitespace combinations.
+ if ch == ' ':
+ if index == 0:
+ leading_space = True
+ if index == len(scalar)-1:
+ trailing_space = True
+ if previous_break:
+ break_space = True
+ previous_space = True
+ previous_break = False
+ elif ch in '\n\x85\u2028\u2029':
+ if index == 0:
+ leading_break = True
+ if index == len(scalar)-1:
+ trailing_break = True
+ if previous_space:
+ space_break = True
+ previous_space = False
+ previous_break = True
+ else:
+ previous_space = False
+ previous_break = False
+
+ # Prepare for the next character.
+ index += 1
+ preceeded_by_whitespace = (ch in '\0 \t\r\n\x85\u2028\u2029')
+ followed_by_whitespace = (index+1 >= len(scalar) or
+ scalar[index+1] in '\0 \t\r\n\x85\u2028\u2029')
+
+ # Let's decide what styles are allowed.
+ allow_flow_plain = True
+ allow_block_plain = True
+ allow_single_quoted = True
+ allow_double_quoted = True
+ allow_block = True
+
+ # Leading and trailing whitespaces are bad for plain scalars.
+ if (leading_space or leading_break
+ or trailing_space or trailing_break):
+ allow_flow_plain = allow_block_plain = False
+
+ # We do not permit trailing spaces for block scalars.
+ if trailing_space:
+ allow_block = False
+
+ # Spaces at the beginning of a new line are only acceptable for block
+ # scalars.
+ if break_space:
+ allow_flow_plain = allow_block_plain = allow_single_quoted = False
+
+ # Spaces followed by breaks, as well as special character are only
+ # allowed for double quoted scalars.
+ if space_break or special_characters:
+ allow_flow_plain = allow_block_plain = \
+ allow_single_quoted = allow_block = False
+
+ # Although the plain scalar writer supports breaks, we never emit
+ # multiline plain scalars.
+ if line_breaks:
+ allow_flow_plain = allow_block_plain = False
+
+ # Flow indicators are forbidden for flow plain scalars.
+ if flow_indicators:
+ allow_flow_plain = False
+
+ # Block indicators are forbidden for block plain scalars.
+ if block_indicators:
+ allow_block_plain = False
+
+ return ScalarAnalysis(scalar=scalar,
+ empty=False, multiline=line_breaks,
+ allow_flow_plain=allow_flow_plain,
+ allow_block_plain=allow_block_plain,
+ allow_single_quoted=allow_single_quoted,
+ allow_double_quoted=allow_double_quoted,
+ allow_block=allow_block)
+
+ # Writers.
+
+ def flush_stream(self):
+ if hasattr(self.stream, 'flush'):
+ self.stream.flush()
+
+ def write_stream_start(self):
+ # Write BOM if needed.
+ if self.encoding and self.encoding.startswith('utf-16'):
+ self.stream.write('\uFEFF'.encode(self.encoding))
+
+ def write_stream_end(self):
+ self.flush_stream()
+
+ def write_indicator(self, indicator, need_whitespace,
+ whitespace=False, indention=False):
+ if self.whitespace or not need_whitespace:
+ data = indicator
+ else:
+ data = ' '+indicator
+ self.whitespace = whitespace
+ self.indention = self.indention and indention
+ self.column += len(data)
+ self.open_ended = False
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_indent(self):
+ indent = self.indent or 0
+ if not self.indention or self.column > indent \
+ or (self.column == indent and not self.whitespace):
+ self.write_line_break()
+ if self.column < indent:
+ self.whitespace = True
+ data = ' '*(indent-self.column)
+ self.column = indent
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_line_break(self, data=None):
+ if data is None:
+ data = self.best_line_break
+ self.whitespace = True
+ self.indention = True
+ self.line += 1
+ self.column = 0
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_version_directive(self, version_text):
+ data = '%%YAML %s' % version_text
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ def write_tag_directive(self, handle_text, prefix_text):
+ data = '%%TAG %s %s' % (handle_text, prefix_text)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ # Scalar streams.
+
+ def write_single_quoted(self, text, split=True):
+ self.write_indicator('\'', True)
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch is None or ch != ' ':
+ if start+1 == end and self.column > self.best_width and split \
+ and start != 0 and end != len(text):
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch is None or ch not in '\n\x85\u2028\u2029':
+ if text[start] == '\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in ' \n\x85\u2028\u2029' or ch == '\'':
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch == '\'':
+ data = '\'\''
+ self.column += 2
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end + 1
+ if ch is not None:
+ spaces = (ch == ' ')
+ breaks = (ch in '\n\x85\u2028\u2029')
+ end += 1
+ self.write_indicator('\'', False)
+
+ ESCAPE_REPLACEMENTS = {
+ '\0': '0',
+ '\x07': 'a',
+ '\x08': 'b',
+ '\x09': 't',
+ '\x0A': 'n',
+ '\x0B': 'v',
+ '\x0C': 'f',
+ '\x0D': 'r',
+ '\x1B': 'e',
+ '\"': '\"',
+ '\\': '\\',
+ '\x85': 'N',
+ '\xA0': '_',
+ '\u2028': 'L',
+ '\u2029': 'P',
+ }
+
+ def write_double_quoted(self, text, split=True):
+ self.write_indicator('"', True)
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if ch is None or ch in '"\\\x85\u2028\u2029\uFEFF' \
+ or not ('\x20' <= ch <= '\x7E'
+ or (self.allow_unicode
+ and ('\xA0' <= ch <= '\uD7FF'
+ or '\uE000' <= ch <= '\uFFFD'))):
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ if ch in self.ESCAPE_REPLACEMENTS:
+ data = '\\'+self.ESCAPE_REPLACEMENTS[ch]
+ elif ch <= '\xFF':
+ data = '\\x%02X' % ord(ch)
+ elif ch <= '\uFFFF':
+ data = '\\u%04X' % ord(ch)
+ else:
+ data = '\\U%08X' % ord(ch)
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end+1
+ if 0 < end < len(text)-1 and (ch == ' ' or start >= end) \
+ and self.column+(end-start) > self.best_width and split:
+ data = text[start:end]+'\\'
+ if start < end:
+ start = end
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ if text[start] == ' ':
+ data = '\\'
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ end += 1
+ self.write_indicator('"', False)
+
+ def determine_block_hints(self, text):
+ hints = ''
+ if text:
+ if text[0] in ' \n\x85\u2028\u2029':
+ hints += str(self.best_indent)
+ if text[-1] not in '\n\x85\u2028\u2029':
+ hints += '-'
+ elif len(text) == 1 or text[-2] in '\n\x85\u2028\u2029':
+ hints += '+'
+ return hints
+
+ def write_folded(self, text):
+ hints = self.determine_block_hints(text)
+ self.write_indicator('>'+hints, True)
+ if hints[-1:] == '+':
+ self.open_ended = True
+ self.write_line_break()
+ leading_space = True
+ spaces = False
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in '\n\x85\u2028\u2029':
+ if not leading_space and ch is not None and ch != ' ' \
+ and text[start] == '\n':
+ self.write_line_break()
+ leading_space = (ch == ' ')
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ elif spaces:
+ if ch != ' ':
+ if start+1 == end and self.column > self.best_width:
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ else:
+ if ch is None or ch in ' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = (ch in '\n\x85\u2028\u2029')
+ spaces = (ch == ' ')
+ end += 1
+
+ def write_literal(self, text):
+ hints = self.determine_block_hints(text)
+ self.write_indicator('|'+hints, True)
+ if hints[-1:] == '+':
+ self.open_ended = True
+ self.write_line_break()
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in '\n\x85\u2028\u2029':
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in '\n\x85\u2028\u2029':
+ data = text[start:end]
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = (ch in '\n\x85\u2028\u2029')
+ end += 1
+
+ def write_plain(self, text, split=True):
+ if self.root_context:
+ self.open_ended = True
+ if not text:
+ return
+ if not self.whitespace:
+ data = ' '
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.whitespace = False
+ self.indention = False
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch != ' ':
+ if start+1 == end and self.column > self.best_width and split:
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch not in '\n\x85\u2028\u2029':
+ if text[start] == '\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ start = end
+ else:
+ if ch is None or ch in ' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ spaces = (ch == ' ')
+ breaks = (ch in '\n\x85\u2028\u2029')
+ end += 1
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/error.py b/collectors/python.d.plugin/python_modules/pyyaml3/error.py
new file mode 100644
index 000000000..5fec7d449
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/error.py
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
+
+class Mark:
+
+ def __init__(self, name, index, line, column, buffer, pointer):
+ self.name = name
+ self.index = index
+ self.line = line
+ self.column = column
+ self.buffer = buffer
+ self.pointer = pointer
+
+ def get_snippet(self, indent=4, max_length=75):
+ if self.buffer is None:
+ return None
+ head = ''
+ start = self.pointer
+ while start > 0 and self.buffer[start-1] not in '\0\r\n\x85\u2028\u2029':
+ start -= 1
+ if self.pointer-start > max_length/2-1:
+ head = ' ... '
+ start += 5
+ break
+ tail = ''
+ end = self.pointer
+ while end < len(self.buffer) and self.buffer[end] not in '\0\r\n\x85\u2028\u2029':
+ end += 1
+ if end-self.pointer > max_length/2-1:
+ tail = ' ... '
+ end -= 5
+ break
+ snippet = self.buffer[start:end]
+ return ' '*indent + head + snippet + tail + '\n' \
+ + ' '*(indent+self.pointer-start+len(head)) + '^'
+
+ def __str__(self):
+ snippet = self.get_snippet()
+ where = " in \"%s\", line %d, column %d" \
+ % (self.name, self.line+1, self.column+1)
+ if snippet is not None:
+ where += ":\n"+snippet
+ return where
+
+class YAMLError(Exception):
+ pass
+
+class MarkedYAMLError(YAMLError):
+
+ def __init__(self, context=None, context_mark=None,
+ problem=None, problem_mark=None, note=None):
+ self.context = context
+ self.context_mark = context_mark
+ self.problem = problem
+ self.problem_mark = problem_mark
+ self.note = note
+
+ def __str__(self):
+ lines = []
+ if self.context is not None:
+ lines.append(self.context)
+ if self.context_mark is not None \
+ and (self.problem is None or self.problem_mark is None
+ or self.context_mark.name != self.problem_mark.name
+ or self.context_mark.line != self.problem_mark.line
+ or self.context_mark.column != self.problem_mark.column):
+ lines.append(str(self.context_mark))
+ if self.problem is not None:
+ lines.append(self.problem)
+ if self.problem_mark is not None:
+ lines.append(str(self.problem_mark))
+ if self.note is not None:
+ lines.append(self.note)
+ return '\n'.join(lines)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/events.py b/collectors/python.d.plugin/python_modules/pyyaml3/events.py
new file mode 100644
index 000000000..283452add
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/events.py
@@ -0,0 +1,87 @@
+# SPDX-License-Identifier: MIT
+
+# Abstract classes.
+
+class Event(object):
+ def __init__(self, start_mark=None, end_mark=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
+ if hasattr(self, key)]
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+ for key in attributes])
+ return '%s(%s)' % (self.__class__.__name__, arguments)
+
+class NodeEvent(Event):
+ def __init__(self, anchor, start_mark=None, end_mark=None):
+ self.anchor = anchor
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class CollectionStartEvent(NodeEvent):
+ def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
+ flow_style=None):
+ self.anchor = anchor
+ self.tag = tag
+ self.implicit = implicit
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.flow_style = flow_style
+
+class CollectionEndEvent(Event):
+ pass
+
+# Implementations.
+
+class StreamStartEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None, encoding=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.encoding = encoding
+
+class StreamEndEvent(Event):
+ pass
+
+class DocumentStartEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None,
+ explicit=None, version=None, tags=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.explicit = explicit
+ self.version = version
+ self.tags = tags
+
+class DocumentEndEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None,
+ explicit=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.explicit = explicit
+
+class AliasEvent(NodeEvent):
+ pass
+
+class ScalarEvent(NodeEvent):
+ def __init__(self, anchor, tag, implicit, value,
+ start_mark=None, end_mark=None, style=None):
+ self.anchor = anchor
+ self.tag = tag
+ self.implicit = implicit
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
+class SequenceStartEvent(CollectionStartEvent):
+ pass
+
+class SequenceEndEvent(CollectionEndEvent):
+ pass
+
+class MappingStartEvent(CollectionStartEvent):
+ pass
+
+class MappingEndEvent(CollectionEndEvent):
+ pass
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/loader.py b/collectors/python.d.plugin/python_modules/pyyaml3/loader.py
new file mode 100644
index 000000000..7ef6cf815
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/loader.py
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['BaseLoader', 'SafeLoader', 'Loader']
+
+from .reader import *
+from .scanner import *
+from .parser import *
+from .composer import *
+from .constructor import *
+from .resolver import *
+
+class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ BaseConstructor.__init__(self)
+ BaseResolver.__init__(self)
+
+class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ SafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/nodes.py b/collectors/python.d.plugin/python_modules/pyyaml3/nodes.py
new file mode 100644
index 000000000..ed2a1b43e
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/nodes.py
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: MIT
+
+class Node(object):
+ def __init__(self, tag, value, start_mark, end_mark):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ value = self.value
+ #if isinstance(value, list):
+ # if len(value) == 0:
+ # value = '<empty>'
+ # elif len(value) == 1:
+ # value = '<1 item>'
+ # else:
+ # value = '<%d items>' % len(value)
+ #else:
+ # if len(value) > 75:
+ # value = repr(value[:70]+u' ... ')
+ # else:
+ # value = repr(value)
+ value = repr(value)
+ return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
+
+class ScalarNode(Node):
+ id = 'scalar'
+ def __init__(self, tag, value,
+ start_mark=None, end_mark=None, style=None):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
+class CollectionNode(Node):
+ def __init__(self, tag, value,
+ start_mark=None, end_mark=None, flow_style=None):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.flow_style = flow_style
+
+class SequenceNode(CollectionNode):
+ id = 'sequence'
+
+class MappingNode(CollectionNode):
+ id = 'mapping'
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/parser.py b/collectors/python.d.plugin/python_modules/pyyaml3/parser.py
new file mode 100644
index 000000000..bcec7f994
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/parser.py
@@ -0,0 +1,590 @@
+# SPDX-License-Identifier: MIT
+
+# The following YAML grammar is LL(1) and is parsed by a recursive descent
+# parser.
+#
+# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+# implicit_document ::= block_node DOCUMENT-END*
+# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+# block_node_or_indentless_sequence ::=
+# ALIAS
+# | properties (block_content | indentless_block_sequence)?
+# | block_content
+# | indentless_block_sequence
+# block_node ::= ALIAS
+# | properties block_content?
+# | block_content
+# flow_node ::= ALIAS
+# | properties flow_content?
+# | flow_content
+# properties ::= TAG ANCHOR? | ANCHOR TAG?
+# block_content ::= block_collection | flow_collection | SCALAR
+# flow_content ::= flow_collection | SCALAR
+# block_collection ::= block_sequence | block_mapping
+# flow_collection ::= flow_sequence | flow_mapping
+# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+# block_mapping ::= BLOCK-MAPPING_START
+# ((KEY block_node_or_indentless_sequence?)?
+# (VALUE block_node_or_indentless_sequence?)?)*
+# BLOCK-END
+# flow_sequence ::= FLOW-SEQUENCE-START
+# (flow_sequence_entry FLOW-ENTRY)*
+# flow_sequence_entry?
+# FLOW-SEQUENCE-END
+# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+# flow_mapping ::= FLOW-MAPPING-START
+# (flow_mapping_entry FLOW-ENTRY)*
+# flow_mapping_entry?
+# FLOW-MAPPING-END
+# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+#
+# FIRST sets:
+#
+# stream: { STREAM-START }
+# explicit_document: { DIRECTIVE DOCUMENT-START }
+# implicit_document: FIRST(block_node)
+# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_sequence: { BLOCK-SEQUENCE-START }
+# block_mapping: { BLOCK-MAPPING-START }
+# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
+# indentless_sequence: { ENTRY }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_sequence: { FLOW-SEQUENCE-START }
+# flow_mapping: { FLOW-MAPPING-START }
+# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+
+__all__ = ['Parser', 'ParserError']
+
+from .error import MarkedYAMLError
+from .tokens import *
+from .events import *
+from .scanner import *
+
+class ParserError(MarkedYAMLError):
+ pass
+
+class Parser:
+ # Since writing a recursive-descendant parser is a straightforward task, we
+ # do not give many comments here.
+
+ DEFAULT_TAGS = {
+ '!': '!',
+ '!!': 'tag:yaml.org,2002:',
+ }
+
+ def __init__(self):
+ self.current_event = None
+ self.yaml_version = None
+ self.tag_handles = {}
+ self.states = []
+ self.marks = []
+ self.state = self.parse_stream_start
+
+ def dispose(self):
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def check_event(self, *choices):
+ # Check the type of the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ if self.current_event is not None:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.current_event, choice):
+ return True
+ return False
+
+ def peek_event(self):
+ # Get the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ return self.current_event
+
+ def get_event(self):
+ # Get the next event and proceed further.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ value = self.current_event
+ self.current_event = None
+ return value
+
+ # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+ # implicit_document ::= block_node DOCUMENT-END*
+ # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+
+ def parse_stream_start(self):
+
+ # Parse the stream start.
+ token = self.get_token()
+ event = StreamStartEvent(token.start_mark, token.end_mark,
+ encoding=token.encoding)
+
+ # Prepare the next state.
+ self.state = self.parse_implicit_document_start
+
+ return event
+
+ def parse_implicit_document_start(self):
+
+ # Parse an implicit document.
+ if not self.check_token(DirectiveToken, DocumentStartToken,
+ StreamEndToken):
+ self.tag_handles = self.DEFAULT_TAGS
+ token = self.peek_token()
+ start_mark = end_mark = token.start_mark
+ event = DocumentStartEvent(start_mark, end_mark,
+ explicit=False)
+
+ # Prepare the next state.
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_block_node
+
+ return event
+
+ else:
+ return self.parse_document_start()
+
+ def parse_document_start(self):
+
+ # Parse any extra document end indicators.
+ while self.check_token(DocumentEndToken):
+ self.get_token()
+
+ # Parse an explicit document.
+ if not self.check_token(StreamEndToken):
+ token = self.peek_token()
+ start_mark = token.start_mark
+ version, tags = self.process_directives()
+ if not self.check_token(DocumentStartToken):
+ raise ParserError(None, None,
+ "expected '<document start>', but found %r"
+ % self.peek_token().id,
+ self.peek_token().start_mark)
+ token = self.get_token()
+ end_mark = token.end_mark
+ event = DocumentStartEvent(start_mark, end_mark,
+ explicit=True, version=version, tags=tags)
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_document_content
+ else:
+ # Parse the end of the stream.
+ token = self.get_token()
+ event = StreamEndEvent(token.start_mark, token.end_mark)
+ assert not self.states
+ assert not self.marks
+ self.state = None
+ return event
+
+ def parse_document_end(self):
+
+ # Parse the document end.
+ token = self.peek_token()
+ start_mark = end_mark = token.start_mark
+ explicit = False
+ if self.check_token(DocumentEndToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ explicit = True
+ event = DocumentEndEvent(start_mark, end_mark,
+ explicit=explicit)
+
+ # Prepare the next state.
+ self.state = self.parse_document_start
+
+ return event
+
+ def parse_document_content(self):
+ if self.check_token(DirectiveToken,
+ DocumentStartToken, DocumentEndToken, StreamEndToken):
+ event = self.process_empty_scalar(self.peek_token().start_mark)
+ self.state = self.states.pop()
+ return event
+ else:
+ return self.parse_block_node()
+
+ def process_directives(self):
+ self.yaml_version = None
+ self.tag_handles = {}
+ while self.check_token(DirectiveToken):
+ token = self.get_token()
+ if token.name == 'YAML':
+ if self.yaml_version is not None:
+ raise ParserError(None, None,
+ "found duplicate YAML directive", token.start_mark)
+ major, minor = token.value
+ if major != 1:
+ raise ParserError(None, None,
+ "found incompatible YAML document (version 1.* is required)",
+ token.start_mark)
+ self.yaml_version = token.value
+ elif token.name == 'TAG':
+ handle, prefix = token.value
+ if handle in self.tag_handles:
+ raise ParserError(None, None,
+ "duplicate tag handle %r" % handle,
+ token.start_mark)
+ self.tag_handles[handle] = prefix
+ if self.tag_handles:
+ value = self.yaml_version, self.tag_handles.copy()
+ else:
+ value = self.yaml_version, None
+ for key in self.DEFAULT_TAGS:
+ if key not in self.tag_handles:
+ self.tag_handles[key] = self.DEFAULT_TAGS[key]
+ return value
+
+ # block_node_or_indentless_sequence ::= ALIAS
+ # | properties (block_content | indentless_block_sequence)?
+ # | block_content
+ # | indentless_block_sequence
+ # block_node ::= ALIAS
+ # | properties block_content?
+ # | block_content
+ # flow_node ::= ALIAS
+ # | properties flow_content?
+ # | flow_content
+ # properties ::= TAG ANCHOR? | ANCHOR TAG?
+ # block_content ::= block_collection | flow_collection | SCALAR
+ # flow_content ::= flow_collection | SCALAR
+ # block_collection ::= block_sequence | block_mapping
+ # flow_collection ::= flow_sequence | flow_mapping
+
+ def parse_block_node(self):
+ return self.parse_node(block=True)
+
+ def parse_flow_node(self):
+ return self.parse_node()
+
+ def parse_block_node_or_indentless_sequence(self):
+ return self.parse_node(block=True, indentless_sequence=True)
+
+ def parse_node(self, block=False, indentless_sequence=False):
+ if self.check_token(AliasToken):
+ token = self.get_token()
+ event = AliasEvent(token.value, token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ else:
+ anchor = None
+ tag = None
+ start_mark = end_mark = tag_mark = None
+ if self.check_token(AnchorToken):
+ token = self.get_token()
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ anchor = token.value
+ if self.check_token(TagToken):
+ token = self.get_token()
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ elif self.check_token(TagToken):
+ token = self.get_token()
+ start_mark = tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ if self.check_token(AnchorToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ anchor = token.value
+ if tag is not None:
+ handle, suffix = tag
+ if handle is not None:
+ if handle not in self.tag_handles:
+ raise ParserError("while parsing a node", start_mark,
+ "found undefined tag handle %r" % handle,
+ tag_mark)
+ tag = self.tag_handles[handle]+suffix
+ else:
+ tag = suffix
+ #if tag == '!':
+ # raise ParserError("while parsing a node", start_mark,
+ # "found non-specific tag '!'", tag_mark,
+ # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
+ if start_mark is None:
+ start_mark = end_mark = self.peek_token().start_mark
+ event = None
+ implicit = (tag is None or tag == '!')
+ if indentless_sequence and self.check_token(BlockEntryToken):
+ end_mark = self.peek_token().end_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark)
+ self.state = self.parse_indentless_sequence_entry
+ else:
+ if self.check_token(ScalarToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ if (token.plain and tag is None) or tag == '!':
+ implicit = (True, False)
+ elif tag is None:
+ implicit = (False, True)
+ else:
+ implicit = (False, False)
+ event = ScalarEvent(anchor, tag, implicit, token.value,
+ start_mark, end_mark, style=token.style)
+ self.state = self.states.pop()
+ elif self.check_token(FlowSequenceStartToken):
+ end_mark = self.peek_token().end_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=True)
+ self.state = self.parse_flow_sequence_first_entry
+ elif self.check_token(FlowMappingStartToken):
+ end_mark = self.peek_token().end_mark
+ event = MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=True)
+ self.state = self.parse_flow_mapping_first_key
+ elif block and self.check_token(BlockSequenceStartToken):
+ end_mark = self.peek_token().start_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=False)
+ self.state = self.parse_block_sequence_first_entry
+ elif block and self.check_token(BlockMappingStartToken):
+ end_mark = self.peek_token().start_mark
+ event = MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=False)
+ self.state = self.parse_block_mapping_first_key
+ elif anchor is not None or tag is not None:
+ # Empty scalars are allowed even if a tag or an anchor is
+ # specified.
+ event = ScalarEvent(anchor, tag, (implicit, False), '',
+ start_mark, end_mark)
+ self.state = self.states.pop()
+ else:
+ if block:
+ node = 'block'
+ else:
+ node = 'flow'
+ token = self.peek_token()
+ raise ParserError("while parsing a %s node" % node, start_mark,
+ "expected the node content, but found %r" % token.id,
+ token.start_mark)
+ return event
+
+ # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+
+ def parse_block_sequence_first_entry(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_sequence_entry()
+
+ def parse_block_sequence_entry(self):
+ if self.check_token(BlockEntryToken):
+ token = self.get_token()
+ if not self.check_token(BlockEntryToken, BlockEndToken):
+ self.states.append(self.parse_block_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_block_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ if not self.check_token(BlockEndToken):
+ token = self.peek_token()
+ raise ParserError("while parsing a block collection", self.marks[-1],
+ "expected <block end>, but found %r" % token.id, token.start_mark)
+ token = self.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ # indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+
+ def parse_indentless_sequence_entry(self):
+ if self.check_token(BlockEntryToken):
+ token = self.get_token()
+ if not self.check_token(BlockEntryToken,
+ KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_indentless_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_indentless_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ token = self.peek_token()
+ event = SequenceEndEvent(token.start_mark, token.start_mark)
+ self.state = self.states.pop()
+ return event
+
+ # block_mapping ::= BLOCK-MAPPING_START
+ # ((KEY block_node_or_indentless_sequence?)?
+ # (VALUE block_node_or_indentless_sequence?)?)*
+ # BLOCK-END
+
+ def parse_block_mapping_first_key(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_mapping_key()
+
+ def parse_block_mapping_key(self):
+ if self.check_token(KeyToken):
+ token = self.get_token()
+ if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_value)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ if not self.check_token(BlockEndToken):
+ token = self.peek_token()
+ raise ParserError("while parsing a block mapping", self.marks[-1],
+ "expected <block end>, but found %r" % token.id, token.start_mark)
+ token = self.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_block_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_key)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_block_mapping_key
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ # flow_sequence ::= FLOW-SEQUENCE-START
+ # (flow_sequence_entry FLOW-ENTRY)*
+ # flow_sequence_entry?
+ # FLOW-SEQUENCE-END
+ # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+ #
+ # Note that while production rules for both flow_sequence_entry and
+ # flow_mapping_entry are equal, their interpretations are different.
+ # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
+ # generate an inline mapping (set syntax).
+
+ def parse_flow_sequence_first_entry(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_sequence_entry(first=True)
+
+ def parse_flow_sequence_entry(self, first=False):
+ if not self.check_token(FlowSequenceEndToken):
+ if not first:
+ if self.check_token(FlowEntryToken):
+ self.get_token()
+ else:
+ token = self.peek_token()
+ raise ParserError("while parsing a flow sequence", self.marks[-1],
+ "expected ',' or ']', but got %r" % token.id, token.start_mark)
+
+ if self.check_token(KeyToken):
+ token = self.peek_token()
+ event = MappingStartEvent(None, None, True,
+ token.start_mark, token.end_mark,
+ flow_style=True)
+ self.state = self.parse_flow_sequence_entry_mapping_key
+ return event
+ elif not self.check_token(FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry)
+ return self.parse_flow_node()
+ token = self.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_sequence_entry_mapping_key(self):
+ token = self.get_token()
+ if not self.check_token(ValueToken,
+ FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+
+ def parse_flow_sequence_entry_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_end)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_sequence_entry_mapping_end(self):
+ self.state = self.parse_flow_sequence_entry
+ token = self.peek_token()
+ return MappingEndEvent(token.start_mark, token.start_mark)
+
+ # flow_mapping ::= FLOW-MAPPING-START
+ # (flow_mapping_entry FLOW-ENTRY)*
+ # flow_mapping_entry?
+ # FLOW-MAPPING-END
+ # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+ def parse_flow_mapping_first_key(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_mapping_key(first=True)
+
+ def parse_flow_mapping_key(self, first=False):
+ if not self.check_token(FlowMappingEndToken):
+ if not first:
+ if self.check_token(FlowEntryToken):
+ self.get_token()
+ else:
+ token = self.peek_token()
+ raise ParserError("while parsing a flow mapping", self.marks[-1],
+ "expected ',' or '}', but got %r" % token.id, token.start_mark)
+ if self.check_token(KeyToken):
+ token = self.get_token()
+ if not self.check_token(ValueToken,
+ FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ elif not self.check_token(FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_empty_value)
+ return self.parse_flow_node()
+ token = self.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_key)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_mapping_key
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_mapping_empty_value(self):
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(self.peek_token().start_mark)
+
+ def process_empty_scalar(self, mark):
+ return ScalarEvent(None, None, (True, False), '', mark, mark)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/reader.py b/collectors/python.d.plugin/python_modules/pyyaml3/reader.py
new file mode 100644
index 000000000..0a515fd64
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/reader.py
@@ -0,0 +1,193 @@
+# SPDX-License-Identifier: MIT
+# This module contains abstractions for the input stream. You don't have to
+# looks further, there are no pretty code.
+#
+# We define two classes here.
+#
+# Mark(source, line, column)
+# It's just a record and its only use is producing nice error messages.
+# Parser does not use it for any other purposes.
+#
+# Reader(source, data)
+# Reader determines the encoding of `data` and converts it to unicode.
+# Reader provides the following methods and attributes:
+# reader.peek(length=1) - return the next `length` characters
+# reader.forward(length=1) - move the current position to `length` characters.
+# reader.index - the number of the current character.
+# reader.line, stream.column - the line and the column of the current character.
+
+__all__ = ['Reader', 'ReaderError']
+
+from .error import YAMLError, Mark
+
+import codecs, re
+
+class ReaderError(YAMLError):
+
+ def __init__(self, name, position, character, encoding, reason):
+ self.name = name
+ self.character = character
+ self.position = position
+ self.encoding = encoding
+ self.reason = reason
+
+ def __str__(self):
+ if isinstance(self.character, bytes):
+ return "'%s' codec can't decode byte #x%02x: %s\n" \
+ " in \"%s\", position %d" \
+ % (self.encoding, ord(self.character), self.reason,
+ self.name, self.position)
+ else:
+ return "unacceptable character #x%04x: %s\n" \
+ " in \"%s\", position %d" \
+ % (self.character, self.reason,
+ self.name, self.position)
+
+class Reader(object):
+ # Reader:
+ # - determines the data encoding and converts it to a unicode string,
+ # - checks if characters are in allowed range,
+ # - adds '\0' to the end.
+
+ # Reader accepts
+ # - a `bytes` object,
+ # - a `str` object,
+ # - a file-like object with its `read` method returning `str`,
+ # - a file-like object with its `read` method returning `unicode`.
+
+ # Yeah, it's ugly and slow.
+
+ def __init__(self, stream):
+ self.name = None
+ self.stream = None
+ self.stream_pointer = 0
+ self.eof = True
+ self.buffer = ''
+ self.pointer = 0
+ self.raw_buffer = None
+ self.raw_decode = None
+ self.encoding = None
+ self.index = 0
+ self.line = 0
+ self.column = 0
+ if isinstance(stream, str):
+ self.name = "<unicode string>"
+ self.check_printable(stream)
+ self.buffer = stream+'\0'
+ elif isinstance(stream, bytes):
+ self.name = "<byte string>"
+ self.raw_buffer = stream
+ self.determine_encoding()
+ else:
+ self.stream = stream
+ self.name = getattr(stream, 'name', "<file>")
+ self.eof = False
+ self.raw_buffer = None
+ self.determine_encoding()
+
+ def peek(self, index=0):
+ try:
+ return self.buffer[self.pointer+index]
+ except IndexError:
+ self.update(index+1)
+ return self.buffer[self.pointer+index]
+
+ def prefix(self, length=1):
+ if self.pointer+length >= len(self.buffer):
+ self.update(length)
+ return self.buffer[self.pointer:self.pointer+length]
+
+ def forward(self, length=1):
+ if self.pointer+length+1 >= len(self.buffer):
+ self.update(length+1)
+ while length:
+ ch = self.buffer[self.pointer]
+ self.pointer += 1
+ self.index += 1
+ if ch in '\n\x85\u2028\u2029' \
+ or (ch == '\r' and self.buffer[self.pointer] != '\n'):
+ self.line += 1
+ self.column = 0
+ elif ch != '\uFEFF':
+ self.column += 1
+ length -= 1
+
+ def get_mark(self):
+ if self.stream is None:
+ return Mark(self.name, self.index, self.line, self.column,
+ self.buffer, self.pointer)
+ else:
+ return Mark(self.name, self.index, self.line, self.column,
+ None, None)
+
+ def determine_encoding(self):
+ while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2):
+ self.update_raw()
+ if isinstance(self.raw_buffer, bytes):
+ if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
+ self.raw_decode = codecs.utf_16_le_decode
+ self.encoding = 'utf-16-le'
+ elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
+ self.raw_decode = codecs.utf_16_be_decode
+ self.encoding = 'utf-16-be'
+ else:
+ self.raw_decode = codecs.utf_8_decode
+ self.encoding = 'utf-8'
+ self.update(1)
+
+ NON_PRINTABLE = re.compile('[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]')
+ def check_printable(self, data):
+ match = self.NON_PRINTABLE.search(data)
+ if match:
+ character = match.group()
+ position = self.index+(len(self.buffer)-self.pointer)+match.start()
+ raise ReaderError(self.name, position, ord(character),
+ 'unicode', "special characters are not allowed")
+
+ def update(self, length):
+ if self.raw_buffer is None:
+ return
+ self.buffer = self.buffer[self.pointer:]
+ self.pointer = 0
+ while len(self.buffer) < length:
+ if not self.eof:
+ self.update_raw()
+ if self.raw_decode is not None:
+ try:
+ data, converted = self.raw_decode(self.raw_buffer,
+ 'strict', self.eof)
+ except UnicodeDecodeError as exc:
+ character = self.raw_buffer[exc.start]
+ if self.stream is not None:
+ position = self.stream_pointer-len(self.raw_buffer)+exc.start
+ else:
+ position = exc.start
+ raise ReaderError(self.name, position, character,
+ exc.encoding, exc.reason)
+ else:
+ data = self.raw_buffer
+ converted = len(data)
+ self.check_printable(data)
+ self.buffer += data
+ self.raw_buffer = self.raw_buffer[converted:]
+ if self.eof:
+ self.buffer += '\0'
+ self.raw_buffer = None
+ break
+
+ def update_raw(self, size=4096):
+ data = self.stream.read(size)
+ if self.raw_buffer is None:
+ self.raw_buffer = data
+ else:
+ self.raw_buffer += data
+ self.stream_pointer += len(data)
+ if not data:
+ self.eof = True
+
+#try:
+# import psyco
+# psyco.bind(Reader)
+#except ImportError:
+# pass
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/representer.py b/collectors/python.d.plugin/python_modules/pyyaml3/representer.py
new file mode 100644
index 000000000..756a18dcc
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/representer.py
@@ -0,0 +1,375 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
+ 'RepresenterError']
+
+from .error import *
+from .nodes import *
+
+import datetime, sys, copyreg, types, base64
+
+class RepresenterError(YAMLError):
+ pass
+
+class BaseRepresenter:
+
+ yaml_representers = {}
+ yaml_multi_representers = {}
+
+ def __init__(self, default_style=None, default_flow_style=None):
+ self.default_style = default_style
+ self.default_flow_style = default_flow_style
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def represent(self, data):
+ node = self.represent_data(data)
+ self.serialize(node)
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def represent_data(self, data):
+ if self.ignore_aliases(data):
+ self.alias_key = None
+ else:
+ self.alias_key = id(data)
+ if self.alias_key is not None:
+ if self.alias_key in self.represented_objects:
+ node = self.represented_objects[self.alias_key]
+ #if node is None:
+ # raise RepresenterError("recursive objects are not allowed: %r" % data)
+ return node
+ #self.represented_objects[alias_key] = None
+ self.object_keeper.append(data)
+ data_types = type(data).__mro__
+ if data_types[0] in self.yaml_representers:
+ node = self.yaml_representers[data_types[0]](self, data)
+ else:
+ for data_type in data_types:
+ if data_type in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[data_type](self, data)
+ break
+ else:
+ if None in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[None](self, data)
+ elif None in self.yaml_representers:
+ node = self.yaml_representers[None](self, data)
+ else:
+ node = ScalarNode(None, str(data))
+ #if alias_key is not None:
+ # self.represented_objects[alias_key] = node
+ return node
+
+ @classmethod
+ def add_representer(cls, data_type, representer):
+ if not 'yaml_representers' in cls.__dict__:
+ cls.yaml_representers = cls.yaml_representers.copy()
+ cls.yaml_representers[data_type] = representer
+
+ @classmethod
+ def add_multi_representer(cls, data_type, representer):
+ if not 'yaml_multi_representers' in cls.__dict__:
+ cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
+ cls.yaml_multi_representers[data_type] = representer
+
+ def represent_scalar(self, tag, value, style=None):
+ if style is None:
+ style = self.default_style
+ node = ScalarNode(tag, value, style=style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ return node
+
+ def represent_sequence(self, tag, sequence, flow_style=None):
+ value = []
+ node = SequenceNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ for item in sequence:
+ node_item = self.represent_data(item)
+ if not (isinstance(node_item, ScalarNode) and not node_item.style):
+ best_style = False
+ value.append(node_item)
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def represent_mapping(self, tag, mapping, flow_style=None):
+ value = []
+ node = MappingNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ if hasattr(mapping, 'items'):
+ mapping = list(mapping.items())
+ try:
+ mapping = sorted(mapping)
+ except TypeError:
+ pass
+ for item_key, item_value in mapping:
+ node_key = self.represent_data(item_key)
+ node_value = self.represent_data(item_value)
+ if not (isinstance(node_key, ScalarNode) and not node_key.style):
+ best_style = False
+ if not (isinstance(node_value, ScalarNode) and not node_value.style):
+ best_style = False
+ value.append((node_key, node_value))
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def ignore_aliases(self, data):
+ return False
+
+class SafeRepresenter(BaseRepresenter):
+
+ def ignore_aliases(self, data):
+ if data in [None, ()]:
+ return True
+ if isinstance(data, (str, bytes, bool, int, float)):
+ return True
+
+ def represent_none(self, data):
+ return self.represent_scalar('tag:yaml.org,2002:null', 'null')
+
+ def represent_str(self, data):
+ return self.represent_scalar('tag:yaml.org,2002:str', data)
+
+ def represent_binary(self, data):
+ if hasattr(base64, 'encodebytes'):
+ data = base64.encodebytes(data).decode('ascii')
+ else:
+ data = base64.encodestring(data).decode('ascii')
+ return self.represent_scalar('tag:yaml.org,2002:binary', data, style='|')
+
+ def represent_bool(self, data):
+ if data:
+ value = 'true'
+ else:
+ value = 'false'
+ return self.represent_scalar('tag:yaml.org,2002:bool', value)
+
+ def represent_int(self, data):
+ return self.represent_scalar('tag:yaml.org,2002:int', str(data))
+
+ inf_value = 1e300
+ while repr(inf_value) != repr(inf_value*inf_value):
+ inf_value *= inf_value
+
+ def represent_float(self, data):
+ if data != data or (data == 0.0 and data == 1.0):
+ value = '.nan'
+ elif data == self.inf_value:
+ value = '.inf'
+ elif data == -self.inf_value:
+ value = '-.inf'
+ else:
+ value = repr(data).lower()
+ # Note that in some cases `repr(data)` represents a float number
+ # without the decimal parts. For instance:
+ # >>> repr(1e17)
+ # '1e17'
+ # Unfortunately, this is not a valid float representation according
+ # to the definition of the `!!float` tag. We fix this by adding
+ # '.0' before the 'e' symbol.
+ if '.' not in value and 'e' in value:
+ value = value.replace('e', '.0e', 1)
+ return self.represent_scalar('tag:yaml.org,2002:float', value)
+
+ def represent_list(self, data):
+ #pairs = (len(data) > 0 and isinstance(data, list))
+ #if pairs:
+ # for item in data:
+ # if not isinstance(item, tuple) or len(item) != 2:
+ # pairs = False
+ # break
+ #if not pairs:
+ return self.represent_sequence('tag:yaml.org,2002:seq', data)
+ #value = []
+ #for item_key, item_value in data:
+ # value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
+ # [(item_key, item_value)]))
+ #return SequenceNode(u'tag:yaml.org,2002:pairs', value)
+
+ def represent_dict(self, data):
+ return self.represent_mapping('tag:yaml.org,2002:map', data)
+
+ def represent_set(self, data):
+ value = {}
+ for key in data:
+ value[key] = None
+ return self.represent_mapping('tag:yaml.org,2002:set', value)
+
+ def represent_date(self, data):
+ value = data.isoformat()
+ return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
+
+ def represent_datetime(self, data):
+ value = data.isoformat(' ')
+ return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
+
+ def represent_yaml_object(self, tag, data, cls, flow_style=None):
+ if hasattr(data, '__getstate__'):
+ state = data.__getstate__()
+ else:
+ state = data.__dict__.copy()
+ return self.represent_mapping(tag, state, flow_style=flow_style)
+
+ def represent_undefined(self, data):
+ raise RepresenterError("cannot represent an object: %s" % data)
+
+SafeRepresenter.add_representer(type(None),
+ SafeRepresenter.represent_none)
+
+SafeRepresenter.add_representer(str,
+ SafeRepresenter.represent_str)
+
+SafeRepresenter.add_representer(bytes,
+ SafeRepresenter.represent_binary)
+
+SafeRepresenter.add_representer(bool,
+ SafeRepresenter.represent_bool)
+
+SafeRepresenter.add_representer(int,
+ SafeRepresenter.represent_int)
+
+SafeRepresenter.add_representer(float,
+ SafeRepresenter.represent_float)
+
+SafeRepresenter.add_representer(list,
+ SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(tuple,
+ SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(dict,
+ SafeRepresenter.represent_dict)
+
+SafeRepresenter.add_representer(set,
+ SafeRepresenter.represent_set)
+
+SafeRepresenter.add_representer(datetime.date,
+ SafeRepresenter.represent_date)
+
+SafeRepresenter.add_representer(datetime.datetime,
+ SafeRepresenter.represent_datetime)
+
+SafeRepresenter.add_representer(None,
+ SafeRepresenter.represent_undefined)
+
+class Representer(SafeRepresenter):
+
+ def represent_complex(self, data):
+ if data.imag == 0.0:
+ data = '%r' % data.real
+ elif data.real == 0.0:
+ data = '%rj' % data.imag
+ elif data.imag > 0:
+ data = '%r+%rj' % (data.real, data.imag)
+ else:
+ data = '%r%rj' % (data.real, data.imag)
+ return self.represent_scalar('tag:yaml.org,2002:python/complex', data)
+
+ def represent_tuple(self, data):
+ return self.represent_sequence('tag:yaml.org,2002:python/tuple', data)
+
+ def represent_name(self, data):
+ name = '%s.%s' % (data.__module__, data.__name__)
+ return self.represent_scalar('tag:yaml.org,2002:python/name:'+name, '')
+
+ def represent_module(self, data):
+ return self.represent_scalar(
+ 'tag:yaml.org,2002:python/module:'+data.__name__, '')
+
+ def represent_object(self, data):
+ # We use __reduce__ API to save the data. data.__reduce__ returns
+ # a tuple of length 2-5:
+ # (function, args, state, listitems, dictitems)
+
+ # For reconstructing, we calls function(*args), then set its state,
+ # listitems, and dictitems if they are not None.
+
+ # A special case is when function.__name__ == '__newobj__'. In this
+ # case we create the object with args[0].__new__(*args).
+
+ # Another special case is when __reduce__ returns a string - we don't
+ # support it.
+
+ # We produce a !!python/object, !!python/object/new or
+ # !!python/object/apply node.
+
+ cls = type(data)
+ if cls in copyreg.dispatch_table:
+ reduce = copyreg.dispatch_table[cls](data)
+ elif hasattr(data, '__reduce_ex__'):
+ reduce = data.__reduce_ex__(2)
+ elif hasattr(data, '__reduce__'):
+ reduce = data.__reduce__()
+ else:
+ raise RepresenterError("cannot represent object: %r" % data)
+ reduce = (list(reduce)+[None]*5)[:5]
+ function, args, state, listitems, dictitems = reduce
+ args = list(args)
+ if state is None:
+ state = {}
+ if listitems is not None:
+ listitems = list(listitems)
+ if dictitems is not None:
+ dictitems = dict(dictitems)
+ if function.__name__ == '__newobj__':
+ function = args[0]
+ args = args[1:]
+ tag = 'tag:yaml.org,2002:python/object/new:'
+ newobj = True
+ else:
+ tag = 'tag:yaml.org,2002:python/object/apply:'
+ newobj = False
+ function_name = '%s.%s' % (function.__module__, function.__name__)
+ if not args and not listitems and not dictitems \
+ and isinstance(state, dict) and newobj:
+ return self.represent_mapping(
+ 'tag:yaml.org,2002:python/object:'+function_name, state)
+ if not listitems and not dictitems \
+ and isinstance(state, dict) and not state:
+ return self.represent_sequence(tag+function_name, args)
+ value = {}
+ if args:
+ value['args'] = args
+ if state or not isinstance(state, dict):
+ value['state'] = state
+ if listitems:
+ value['listitems'] = listitems
+ if dictitems:
+ value['dictitems'] = dictitems
+ return self.represent_mapping(tag+function_name, value)
+
+Representer.add_representer(complex,
+ Representer.represent_complex)
+
+Representer.add_representer(tuple,
+ Representer.represent_tuple)
+
+Representer.add_representer(type,
+ Representer.represent_name)
+
+Representer.add_representer(types.FunctionType,
+ Representer.represent_name)
+
+Representer.add_representer(types.BuiltinFunctionType,
+ Representer.represent_name)
+
+Representer.add_representer(types.ModuleType,
+ Representer.represent_module)
+
+Representer.add_multi_representer(object,
+ Representer.represent_object)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/resolver.py b/collectors/python.d.plugin/python_modules/pyyaml3/resolver.py
new file mode 100644
index 000000000..50945e04d
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/resolver.py
@@ -0,0 +1,225 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['BaseResolver', 'Resolver']
+
+from .error import *
+from .nodes import *
+
+import re
+
+class ResolverError(YAMLError):
+ pass
+
+class BaseResolver:
+
+ DEFAULT_SCALAR_TAG = 'tag:yaml.org,2002:str'
+ DEFAULT_SEQUENCE_TAG = 'tag:yaml.org,2002:seq'
+ DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map'
+
+ yaml_implicit_resolvers = {}
+ yaml_path_resolvers = {}
+
+ def __init__(self):
+ self.resolver_exact_paths = []
+ self.resolver_prefix_paths = []
+
+ @classmethod
+ def add_implicit_resolver(cls, tag, regexp, first):
+ if not 'yaml_implicit_resolvers' in cls.__dict__:
+ cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
+ if first is None:
+ first = [None]
+ for ch in first:
+ cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
+
+ @classmethod
+ def add_path_resolver(cls, tag, path, kind=None):
+ # Note: `add_path_resolver` is experimental. The API could be changed.
+ # `new_path` is a pattern that is matched against the path from the
+ # root to the node that is being considered. `node_path` elements are
+ # tuples `(node_check, index_check)`. `node_check` is a node class:
+ # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
+ # matches any kind of a node. `index_check` could be `None`, a boolean
+ # value, a string value, or a number. `None` and `False` match against
+ # any _value_ of sequence and mapping nodes. `True` matches against
+ # any _key_ of a mapping node. A string `index_check` matches against
+ # a mapping value that corresponds to a scalar key which content is
+ # equal to the `index_check` value. An integer `index_check` matches
+ # against a sequence value with the index equal to `index_check`.
+ if not 'yaml_path_resolvers' in cls.__dict__:
+ cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
+ new_path = []
+ for element in path:
+ if isinstance(element, (list, tuple)):
+ if len(element) == 2:
+ node_check, index_check = element
+ elif len(element) == 1:
+ node_check = element[0]
+ index_check = True
+ else:
+ raise ResolverError("Invalid path element: %s" % element)
+ else:
+ node_check = None
+ index_check = element
+ if node_check is str:
+ node_check = ScalarNode
+ elif node_check is list:
+ node_check = SequenceNode
+ elif node_check is dict:
+ node_check = MappingNode
+ elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
+ and not isinstance(node_check, str) \
+ and node_check is not None:
+ raise ResolverError("Invalid node checker: %s" % node_check)
+ if not isinstance(index_check, (str, int)) \
+ and index_check is not None:
+ raise ResolverError("Invalid index checker: %s" % index_check)
+ new_path.append((node_check, index_check))
+ if kind is str:
+ kind = ScalarNode
+ elif kind is list:
+ kind = SequenceNode
+ elif kind is dict:
+ kind = MappingNode
+ elif kind not in [ScalarNode, SequenceNode, MappingNode] \
+ and kind is not None:
+ raise ResolverError("Invalid node kind: %s" % kind)
+ cls.yaml_path_resolvers[tuple(new_path), kind] = tag
+
+ def descend_resolver(self, current_node, current_index):
+ if not self.yaml_path_resolvers:
+ return
+ exact_paths = {}
+ prefix_paths = []
+ if current_node:
+ depth = len(self.resolver_prefix_paths)
+ for path, kind in self.resolver_prefix_paths[-1]:
+ if self.check_resolver_prefix(depth, path, kind,
+ current_node, current_index):
+ if len(path) > depth:
+ prefix_paths.append((path, kind))
+ else:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ for path, kind in self.yaml_path_resolvers:
+ if not path:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ prefix_paths.append((path, kind))
+ self.resolver_exact_paths.append(exact_paths)
+ self.resolver_prefix_paths.append(prefix_paths)
+
+ def ascend_resolver(self):
+ if not self.yaml_path_resolvers:
+ return
+ self.resolver_exact_paths.pop()
+ self.resolver_prefix_paths.pop()
+
+ def check_resolver_prefix(self, depth, path, kind,
+ current_node, current_index):
+ node_check, index_check = path[depth-1]
+ if isinstance(node_check, str):
+ if current_node.tag != node_check:
+ return
+ elif node_check is not None:
+ if not isinstance(current_node, node_check):
+ return
+ if index_check is True and current_index is not None:
+ return
+ if (index_check is False or index_check is None) \
+ and current_index is None:
+ return
+ if isinstance(index_check, str):
+ if not (isinstance(current_index, ScalarNode)
+ and index_check == current_index.value):
+ return
+ elif isinstance(index_check, int) and not isinstance(index_check, bool):
+ if index_check != current_index:
+ return
+ return True
+
+ def resolve(self, kind, value, implicit):
+ if kind is ScalarNode and implicit[0]:
+ if value == '':
+ resolvers = self.yaml_implicit_resolvers.get('', [])
+ else:
+ resolvers = self.yaml_implicit_resolvers.get(value[0], [])
+ resolvers += self.yaml_implicit_resolvers.get(None, [])
+ for tag, regexp in resolvers:
+ if regexp.match(value):
+ return tag
+ implicit = implicit[1]
+ if self.yaml_path_resolvers:
+ exact_paths = self.resolver_exact_paths[-1]
+ if kind in exact_paths:
+ return exact_paths[kind]
+ if None in exact_paths:
+ return exact_paths[None]
+ if kind is ScalarNode:
+ return self.DEFAULT_SCALAR_TAG
+ elif kind is SequenceNode:
+ return self.DEFAULT_SEQUENCE_TAG
+ elif kind is MappingNode:
+ return self.DEFAULT_MAPPING_TAG
+
+class Resolver(BaseResolver):
+ pass
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:bool',
+ re.compile(r'''^(?:yes|Yes|YES|no|No|NO
+ |true|True|TRUE|false|False|FALSE
+ |on|On|ON|off|Off|OFF)$''', re.X),
+ list('yYnNtTfFoO'))
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:float',
+ re.compile(r'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
+ |\.[0-9_]+(?:[eE][-+][0-9]+)?
+ |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
+ |[-+]?\.(?:inf|Inf|INF)
+ |\.(?:nan|NaN|NAN))$''', re.X),
+ list('-+0123456789.'))
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:int',
+ re.compile(r'''^(?:[-+]?0b[0-1_]+
+ |[-+]?0[0-7_]+
+ |[-+]?(?:0|[1-9][0-9_]*)
+ |[-+]?0x[0-9a-fA-F_]+
+ |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
+ list('-+0123456789'))
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:merge',
+ re.compile(r'^(?:<<)$'),
+ ['<'])
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:null',
+ re.compile(r'''^(?: ~
+ |null|Null|NULL
+ | )$''', re.X),
+ ['~', 'n', 'N', ''])
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:timestamp',
+ re.compile(r'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
+ |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
+ (?:[Tt]|[ \t]+)[0-9][0-9]?
+ :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
+ (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
+ list('0123456789'))
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:value',
+ re.compile(r'^(?:=)$'),
+ ['='])
+
+# The following resolver is only for documentation purposes. It cannot work
+# because plain scalars cannot start with '!', '&', or '*'.
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:yaml',
+ re.compile(r'^(?:!|&|\*)$'),
+ list('!&*'))
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/scanner.py b/collectors/python.d.plugin/python_modules/pyyaml3/scanner.py
new file mode 100644
index 000000000..b55854e8b
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/scanner.py
@@ -0,0 +1,1449 @@
+# SPDX-License-Identifier: MIT
+
+# Scanner produces tokens of the following types:
+# STREAM-START
+# STREAM-END
+# DIRECTIVE(name, value)
+# DOCUMENT-START
+# DOCUMENT-END
+# BLOCK-SEQUENCE-START
+# BLOCK-MAPPING-START
+# BLOCK-END
+# FLOW-SEQUENCE-START
+# FLOW-MAPPING-START
+# FLOW-SEQUENCE-END
+# FLOW-MAPPING-END
+# BLOCK-ENTRY
+# FLOW-ENTRY
+# KEY
+# VALUE
+# ALIAS(value)
+# ANCHOR(value)
+# TAG(value)
+# SCALAR(value, plain, style)
+#
+# Read comments in the Scanner code for more details.
+#
+
+__all__ = ['Scanner', 'ScannerError']
+
+from .error import MarkedYAMLError
+from .tokens import *
+
+class ScannerError(MarkedYAMLError):
+ pass
+
+class SimpleKey:
+ # See below simple keys treatment.
+
+ def __init__(self, token_number, required, index, line, column, mark):
+ self.token_number = token_number
+ self.required = required
+ self.index = index
+ self.line = line
+ self.column = column
+ self.mark = mark
+
+class Scanner:
+
+ def __init__(self):
+ """Initialize the scanner."""
+ # It is assumed that Scanner and Reader will have a common descendant.
+ # Reader do the dirty work of checking for BOM and converting the
+ # input data to Unicode. It also adds NUL to the end.
+ #
+ # Reader supports the following methods
+ # self.peek(i=0) # peek the next i-th character
+ # self.prefix(l=1) # peek the next l characters
+ # self.forward(l=1) # read the next l characters and move the pointer.
+
+ # Had we reached the end of the stream?
+ self.done = False
+
+ # The number of unclosed '{' and '['. `flow_level == 0` means block
+ # context.
+ self.flow_level = 0
+
+ # List of processed tokens that are not yet emitted.
+ self.tokens = []
+
+ # Add the STREAM-START token.
+ self.fetch_stream_start()
+
+ # Number of tokens that were emitted through the `get_token` method.
+ self.tokens_taken = 0
+
+ # The current indentation level.
+ self.indent = -1
+
+ # Past indentation levels.
+ self.indents = []
+
+ # Variables related to simple keys treatment.
+
+ # A simple key is a key that is not denoted by the '?' indicator.
+ # Example of simple keys:
+ # ---
+ # block simple key: value
+ # ? not a simple key:
+ # : { flow simple key: value }
+ # We emit the KEY token before all keys, so when we find a potential
+ # simple key, we try to locate the corresponding ':' indicator.
+ # Simple keys should be limited to a single line and 1024 characters.
+
+ # Can a simple key start at the current position? A simple key may
+ # start:
+ # - at the beginning of the line, not counting indentation spaces
+ # (in block context),
+ # - after '{', '[', ',' (in the flow context),
+ # - after '?', ':', '-' (in the block context).
+ # In the block context, this flag also signifies if a block collection
+ # may start at the current position.
+ self.allow_simple_key = True
+
+ # Keep track of possible simple keys. This is a dictionary. The key
+ # is `flow_level`; there can be no more that one possible simple key
+ # for each level. The value is a SimpleKey record:
+ # (token_number, required, index, line, column, mark)
+ # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
+ # '[', or '{' tokens.
+ self.possible_simple_keys = {}
+
+ # Public methods.
+
+ def check_token(self, *choices):
+ # Check if the next token is one of the given types.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.tokens[0], choice):
+ return True
+ return False
+
+ def peek_token(self):
+ # Return the next token, but do not delete if from the queue.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ return self.tokens[0]
+
+ def get_token(self):
+ # Return the next token.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ self.tokens_taken += 1
+ return self.tokens.pop(0)
+
+ # Private methods.
+
+ def need_more_tokens(self):
+ if self.done:
+ return False
+ if not self.tokens:
+ return True
+ # The current token may be a potential simple key, so we
+ # need to look further.
+ self.stale_possible_simple_keys()
+ if self.next_possible_simple_key() == self.tokens_taken:
+ return True
+
+ def fetch_more_tokens(self):
+
+ # Eat whitespaces and comments until we reach the next token.
+ self.scan_to_next_token()
+
+ # Remove obsolete possible simple keys.
+ self.stale_possible_simple_keys()
+
+ # Compare the current indentation and column. It may add some tokens
+ # and decrease the current indentation level.
+ self.unwind_indent(self.column)
+
+ # Peek the next character.
+ ch = self.peek()
+
+ # Is it the end of stream?
+ if ch == '\0':
+ return self.fetch_stream_end()
+
+ # Is it a directive?
+ if ch == '%' and self.check_directive():
+ return self.fetch_directive()
+
+ # Is it the document start?
+ if ch == '-' and self.check_document_start():
+ return self.fetch_document_start()
+
+ # Is it the document end?
+ if ch == '.' and self.check_document_end():
+ return self.fetch_document_end()
+
+ # TODO: support for BOM within a stream.
+ #if ch == '\uFEFF':
+ # return self.fetch_bom() <-- issue BOMToken
+
+ # Note: the order of the following checks is NOT significant.
+
+ # Is it the flow sequence start indicator?
+ if ch == '[':
+ return self.fetch_flow_sequence_start()
+
+ # Is it the flow mapping start indicator?
+ if ch == '{':
+ return self.fetch_flow_mapping_start()
+
+ # Is it the flow sequence end indicator?
+ if ch == ']':
+ return self.fetch_flow_sequence_end()
+
+ # Is it the flow mapping end indicator?
+ if ch == '}':
+ return self.fetch_flow_mapping_end()
+
+ # Is it the flow entry indicator?
+ if ch == ',':
+ return self.fetch_flow_entry()
+
+ # Is it the block entry indicator?
+ if ch == '-' and self.check_block_entry():
+ return self.fetch_block_entry()
+
+ # Is it the key indicator?
+ if ch == '?' and self.check_key():
+ return self.fetch_key()
+
+ # Is it the value indicator?
+ if ch == ':' and self.check_value():
+ return self.fetch_value()
+
+ # Is it an alias?
+ if ch == '*':
+ return self.fetch_alias()
+
+ # Is it an anchor?
+ if ch == '&':
+ return self.fetch_anchor()
+
+ # Is it a tag?
+ if ch == '!':
+ return self.fetch_tag()
+
+ # Is it a literal scalar?
+ if ch == '|' and not self.flow_level:
+ return self.fetch_literal()
+
+ # Is it a folded scalar?
+ if ch == '>' and not self.flow_level:
+ return self.fetch_folded()
+
+ # Is it a single quoted scalar?
+ if ch == '\'':
+ return self.fetch_single()
+
+ # Is it a double quoted scalar?
+ if ch == '\"':
+ return self.fetch_double()
+
+ # It must be a plain scalar then.
+ if self.check_plain():
+ return self.fetch_plain()
+
+ # No? It's an error. Let's produce a nice error message.
+ raise ScannerError("while scanning for the next token", None,
+ "found character %r that cannot start any token" % ch,
+ self.get_mark())
+
+ # Simple keys treatment.
+
+ def next_possible_simple_key(self):
+ # Return the number of the nearest possible simple key. Actually we
+ # don't need to loop through the whole dictionary. We may replace it
+ # with the following code:
+ # if not self.possible_simple_keys:
+ # return None
+ # return self.possible_simple_keys[
+ # min(self.possible_simple_keys.keys())].token_number
+ min_token_number = None
+ for level in self.possible_simple_keys:
+ key = self.possible_simple_keys[level]
+ if min_token_number is None or key.token_number < min_token_number:
+ min_token_number = key.token_number
+ return min_token_number
+
+ def stale_possible_simple_keys(self):
+ # Remove entries that are no longer possible simple keys. According to
+ # the YAML specification, simple keys
+ # - should be limited to a single line,
+ # - should be no longer than 1024 characters.
+ # Disabling this procedure will allow simple keys of any length and
+ # height (may cause problems if indentation is broken though).
+ for level in list(self.possible_simple_keys):
+ key = self.possible_simple_keys[level]
+ if key.line != self.line \
+ or self.index-key.index > 1024:
+ if key.required:
+ raise ScannerError("while scanning a simple key", key.mark,
+ "could not found expected ':'", self.get_mark())
+ del self.possible_simple_keys[level]
+
+ def save_possible_simple_key(self):
+ # The next token may start a simple key. We check if it's possible
+ # and save its position. This function is called for
+ # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
+
+ # Check if a simple key is required at the current position.
+ required = not self.flow_level and self.indent == self.column
+
+ # A simple key is required only if it is the first token in the current
+ # line. Therefore it is always allowed.
+ assert self.allow_simple_key or not required
+
+ # The next token might be a simple key. Let's save it's number and
+ # position.
+ if self.allow_simple_key:
+ self.remove_possible_simple_key()
+ token_number = self.tokens_taken+len(self.tokens)
+ key = SimpleKey(token_number, required,
+ self.index, self.line, self.column, self.get_mark())
+ self.possible_simple_keys[self.flow_level] = key
+
+ def remove_possible_simple_key(self):
+ # Remove the saved possible key position at the current flow level.
+ if self.flow_level in self.possible_simple_keys:
+ key = self.possible_simple_keys[self.flow_level]
+
+ if key.required:
+ raise ScannerError("while scanning a simple key", key.mark,
+ "could not found expected ':'", self.get_mark())
+
+ del self.possible_simple_keys[self.flow_level]
+
+ # Indentation functions.
+
+ def unwind_indent(self, column):
+
+ ## In flow context, tokens should respect indentation.
+ ## Actually the condition should be `self.indent >= column` according to
+ ## the spec. But this condition will prohibit intuitively correct
+ ## constructions such as
+ ## key : {
+ ## }
+ #if self.flow_level and self.indent > column:
+ # raise ScannerError(None, None,
+ # "invalid intendation or unclosed '[' or '{'",
+ # self.get_mark())
+
+ # In the flow context, indentation is ignored. We make the scanner less
+ # restrictive then specification requires.
+ if self.flow_level:
+ return
+
+ # In block context, we may need to issue the BLOCK-END tokens.
+ while self.indent > column:
+ mark = self.get_mark()
+ self.indent = self.indents.pop()
+ self.tokens.append(BlockEndToken(mark, mark))
+
+ def add_indent(self, column):
+ # Check if we need to increase indentation.
+ if self.indent < column:
+ self.indents.append(self.indent)
+ self.indent = column
+ return True
+ return False
+
+ # Fetchers.
+
+ def fetch_stream_start(self):
+ # We always add STREAM-START as the first token and STREAM-END as the
+ # last token.
+
+ # Read the token.
+ mark = self.get_mark()
+
+ # Add STREAM-START.
+ self.tokens.append(StreamStartToken(mark, mark,
+ encoding=self.encoding))
+
+
+ def fetch_stream_end(self):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+ self.possible_simple_keys = {}
+
+ # Read the token.
+ mark = self.get_mark()
+
+ # Add STREAM-END.
+ self.tokens.append(StreamEndToken(mark, mark))
+
+ # The steam is finished.
+ self.done = True
+
+ def fetch_directive(self):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Scan and add DIRECTIVE.
+ self.tokens.append(self.scan_directive())
+
+ def fetch_document_start(self):
+ self.fetch_document_indicator(DocumentStartToken)
+
+ def fetch_document_end(self):
+ self.fetch_document_indicator(DocumentEndToken)
+
+ def fetch_document_indicator(self, TokenClass):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys. Note that there could not be a block collection
+ # after '---'.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Add DOCUMENT-START or DOCUMENT-END.
+ start_mark = self.get_mark()
+ self.forward(3)
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_start(self):
+ self.fetch_flow_collection_start(FlowSequenceStartToken)
+
+ def fetch_flow_mapping_start(self):
+ self.fetch_flow_collection_start(FlowMappingStartToken)
+
+ def fetch_flow_collection_start(self, TokenClass):
+
+ # '[' and '{' may start a simple key.
+ self.save_possible_simple_key()
+
+ # Increase the flow level.
+ self.flow_level += 1
+
+ # Simple keys are allowed after '[' and '{'.
+ self.allow_simple_key = True
+
+ # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_end(self):
+ self.fetch_flow_collection_end(FlowSequenceEndToken)
+
+ def fetch_flow_mapping_end(self):
+ self.fetch_flow_collection_end(FlowMappingEndToken)
+
+ def fetch_flow_collection_end(self, TokenClass):
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Decrease the flow level.
+ self.flow_level -= 1
+
+ # No simple keys after ']' or '}'.
+ self.allow_simple_key = False
+
+ # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_entry(self):
+
+ # Simple keys are allowed after ','.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add FLOW-ENTRY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(FlowEntryToken(start_mark, end_mark))
+
+ def fetch_block_entry(self):
+
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a new entry?
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "sequence entries are not allowed here",
+ self.get_mark())
+
+ # We may need to add BLOCK-SEQUENCE-START.
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockSequenceStartToken(mark, mark))
+
+ # It's an error for the block entry to occur in the flow context,
+ # but we let the parser detect this.
+ else:
+ pass
+
+ # Simple keys are allowed after '-'.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add BLOCK-ENTRY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(BlockEntryToken(start_mark, end_mark))
+
+ def fetch_key(self):
+
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a key (not nessesary a simple)?
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "mapping keys are not allowed here",
+ self.get_mark())
+
+ # We may need to add BLOCK-MAPPING-START.
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after '?' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add KEY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(KeyToken(start_mark, end_mark))
+
+ def fetch_value(self):
+
+ # Do we determine a simple key?
+ if self.flow_level in self.possible_simple_keys:
+
+ # Add KEY.
+ key = self.possible_simple_keys[self.flow_level]
+ del self.possible_simple_keys[self.flow_level]
+ self.tokens.insert(key.token_number-self.tokens_taken,
+ KeyToken(key.mark, key.mark))
+
+ # If this key starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START.
+ if not self.flow_level:
+ if self.add_indent(key.column):
+ self.tokens.insert(key.token_number-self.tokens_taken,
+ BlockMappingStartToken(key.mark, key.mark))
+
+ # There cannot be two simple keys one after another.
+ self.allow_simple_key = False
+
+ # It must be a part of a complex key.
+ else:
+
+ # Block context needs additional checks.
+ # (Do we really need them? They will be catched by the parser
+ # anyway.)
+ if not self.flow_level:
+
+ # We are allowed to start a complex value if and only if
+ # we can start a simple key.
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "mapping values are not allowed here",
+ self.get_mark())
+
+ # If this value starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START. It will be detected as an error later by
+ # the parser.
+ if not self.flow_level:
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after ':' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add VALUE.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(ValueToken(start_mark, end_mark))
+
+ def fetch_alias(self):
+
+ # ALIAS could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after ALIAS.
+ self.allow_simple_key = False
+
+ # Scan and add ALIAS.
+ self.tokens.append(self.scan_anchor(AliasToken))
+
+ def fetch_anchor(self):
+
+ # ANCHOR could start a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after ANCHOR.
+ self.allow_simple_key = False
+
+ # Scan and add ANCHOR.
+ self.tokens.append(self.scan_anchor(AnchorToken))
+
+ def fetch_tag(self):
+
+ # TAG could start a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after TAG.
+ self.allow_simple_key = False
+
+ # Scan and add TAG.
+ self.tokens.append(self.scan_tag())
+
+ def fetch_literal(self):
+ self.fetch_block_scalar(style='|')
+
+ def fetch_folded(self):
+ self.fetch_block_scalar(style='>')
+
+ def fetch_block_scalar(self, style):
+
+ # A simple key may follow a block scalar.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_block_scalar(style))
+
+ def fetch_single(self):
+ self.fetch_flow_scalar(style='\'')
+
+ def fetch_double(self):
+ self.fetch_flow_scalar(style='"')
+
+ def fetch_flow_scalar(self, style):
+
+ # A flow scalar could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after flow scalars.
+ self.allow_simple_key = False
+
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_flow_scalar(style))
+
+ def fetch_plain(self):
+
+ # A plain scalar could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after plain scalars. But note that `scan_plain` will
+ # change this flag if the scan is finished at the beginning of the
+ # line.
+ self.allow_simple_key = False
+
+ # Scan and add SCALAR. May change `allow_simple_key`.
+ self.tokens.append(self.scan_plain())
+
+ # Checkers.
+
+ def check_directive(self):
+
+ # DIRECTIVE: ^ '%' ...
+ # The '%' indicator is already checked.
+ if self.column == 0:
+ return True
+
+ def check_document_start(self):
+
+ # DOCUMENT-START: ^ '---' (' '|'\n')
+ if self.column == 0:
+ if self.prefix(3) == '---' \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ return True
+
+ def check_document_end(self):
+
+ # DOCUMENT-END: ^ '...' (' '|'\n')
+ if self.column == 0:
+ if self.prefix(3) == '...' \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ return True
+
+ def check_block_entry(self):
+
+ # BLOCK-ENTRY: '-' (' '|'\n')
+ return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
+
+ def check_key(self):
+
+ # KEY(flow context): '?'
+ if self.flow_level:
+ return True
+
+ # KEY(block context): '?' (' '|'\n')
+ else:
+ return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
+
+ def check_value(self):
+
+ # VALUE(flow context): ':'
+ if self.flow_level:
+ return True
+
+ # VALUE(block context): ':' (' '|'\n')
+ else:
+ return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
+
+ def check_plain(self):
+
+ # A plain scalar may start with any non-space character except:
+ # '-', '?', ':', ',', '[', ']', '{', '}',
+ # '#', '&', '*', '!', '|', '>', '\'', '\"',
+ # '%', '@', '`'.
+ #
+ # It may also start with
+ # '-', '?', ':'
+ # if it is followed by a non-space character.
+ #
+ # Note that we limit the last rule to the block context (except the
+ # '-' character) because we want the flow context to be space
+ # independent.
+ ch = self.peek()
+ return ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \
+ or (self.peek(1) not in '\0 \t\r\n\x85\u2028\u2029'
+ and (ch == '-' or (not self.flow_level and ch in '?:')))
+
+ # Scanners.
+
+ def scan_to_next_token(self):
+ # We ignore spaces, line breaks and comments.
+ # If we find a line break in the block context, we set the flag
+ # `allow_simple_key` on.
+ # The byte order mark is stripped if it's the first character in the
+ # stream. We do not yet support BOM inside the stream as the
+ # specification requires. Any such mark will be considered as a part
+ # of the document.
+ #
+ # TODO: We need to make tab handling rules more sane. A good rule is
+ # Tabs cannot precede tokens
+ # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
+ # KEY(block), VALUE(block), BLOCK-ENTRY
+ # So the checking code is
+ # if <TAB>:
+ # self.allow_simple_keys = False
+ # We also need to add the check for `allow_simple_keys == True` to
+ # `unwind_indent` before issuing BLOCK-END.
+ # Scanners for block, flow, and plain scalars need to be modified.
+
+ if self.index == 0 and self.peek() == '\uFEFF':
+ self.forward()
+ found = False
+ while not found:
+ while self.peek() == ' ':
+ self.forward()
+ if self.peek() == '#':
+ while self.peek() not in '\0\r\n\x85\u2028\u2029':
+ self.forward()
+ if self.scan_line_break():
+ if not self.flow_level:
+ self.allow_simple_key = True
+ else:
+ found = True
+
+ def scan_directive(self):
+ # See the specification for details.
+ start_mark = self.get_mark()
+ self.forward()
+ name = self.scan_directive_name(start_mark)
+ value = None
+ if name == 'YAML':
+ value = self.scan_yaml_directive_value(start_mark)
+ end_mark = self.get_mark()
+ elif name == 'TAG':
+ value = self.scan_tag_directive_value(start_mark)
+ end_mark = self.get_mark()
+ else:
+ end_mark = self.get_mark()
+ while self.peek() not in '\0\r\n\x85\u2028\u2029':
+ self.forward()
+ self.scan_directive_ignored_line(start_mark)
+ return DirectiveToken(name, value, start_mark, end_mark)
+
+ def scan_directive_name(self, start_mark):
+ # See the specification for details.
+ length = 0
+ ch = self.peek(length)
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_':
+ length += 1
+ ch = self.peek(length)
+ if not length:
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch, self.get_mark())
+ value = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch, self.get_mark())
+ return value
+
+ def scan_yaml_directive_value(self, start_mark):
+ # See the specification for details.
+ while self.peek() == ' ':
+ self.forward()
+ major = self.scan_yaml_directive_number(start_mark)
+ if self.peek() != '.':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit or '.', but found %r" % self.peek(),
+ self.get_mark())
+ self.forward()
+ minor = self.scan_yaml_directive_number(start_mark)
+ if self.peek() not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit or ' ', but found %r" % self.peek(),
+ self.get_mark())
+ return (major, minor)
+
+ def scan_yaml_directive_number(self, start_mark):
+ # See the specification for details.
+ ch = self.peek()
+ if not ('0' <= ch <= '9'):
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit, but found %r" % ch, self.get_mark())
+ length = 0
+ while '0' <= self.peek(length) <= '9':
+ length += 1
+ value = int(self.prefix(length))
+ self.forward(length)
+ return value
+
+ def scan_tag_directive_value(self, start_mark):
+ # See the specification for details.
+ while self.peek() == ' ':
+ self.forward()
+ handle = self.scan_tag_directive_handle(start_mark)
+ while self.peek() == ' ':
+ self.forward()
+ prefix = self.scan_tag_directive_prefix(start_mark)
+ return (handle, prefix)
+
+ def scan_tag_directive_handle(self, start_mark):
+ # See the specification for details.
+ value = self.scan_tag_handle('directive', start_mark)
+ ch = self.peek()
+ if ch != ' ':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected ' ', but found %r" % ch, self.get_mark())
+ return value
+
+ def scan_tag_directive_prefix(self, start_mark):
+ # See the specification for details.
+ value = self.scan_tag_uri('directive', start_mark)
+ ch = self.peek()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected ' ', but found %r" % ch, self.get_mark())
+ return value
+
+ def scan_directive_ignored_line(self, start_mark):
+ # See the specification for details.
+ while self.peek() == ' ':
+ self.forward()
+ if self.peek() == '#':
+ while self.peek() not in '\0\r\n\x85\u2028\u2029':
+ self.forward()
+ ch = self.peek()
+ if ch not in '\0\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a comment or a line break, but found %r"
+ % ch, self.get_mark())
+ self.scan_line_break()
+
+ def scan_anchor(self, TokenClass):
+ # The specification does not restrict characters for anchors and
+ # aliases. This may lead to problems, for instance, the document:
+ # [ *alias, value ]
+ # can be interpteted in two ways, as
+ # [ "value" ]
+ # and
+ # [ *alias , "value" ]
+ # Therefore we restrict aliases to numbers and ASCII letters.
+ start_mark = self.get_mark()
+ indicator = self.peek()
+ if indicator == '*':
+ name = 'alias'
+ else:
+ name = 'anchor'
+ self.forward()
+ length = 0
+ ch = self.peek(length)
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_':
+ length += 1
+ ch = self.peek(length)
+ if not length:
+ raise ScannerError("while scanning an %s" % name, start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch, self.get_mark())
+ value = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch not in '\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
+ raise ScannerError("while scanning an %s" % name, start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch, self.get_mark())
+ end_mark = self.get_mark()
+ return TokenClass(value, start_mark, end_mark)
+
+ def scan_tag(self):
+ # See the specification for details.
+ start_mark = self.get_mark()
+ ch = self.peek(1)
+ if ch == '<':
+ handle = None
+ self.forward(2)
+ suffix = self.scan_tag_uri('tag', start_mark)
+ if self.peek() != '>':
+ raise ScannerError("while parsing a tag", start_mark,
+ "expected '>', but found %r" % self.peek(),
+ self.get_mark())
+ self.forward()
+ elif ch in '\0 \t\r\n\x85\u2028\u2029':
+ handle = None
+ suffix = '!'
+ self.forward()
+ else:
+ length = 1
+ use_handle = False
+ while ch not in '\0 \r\n\x85\u2028\u2029':
+ if ch == '!':
+ use_handle = True
+ break
+ length += 1
+ ch = self.peek(length)
+ handle = '!'
+ if use_handle:
+ handle = self.scan_tag_handle('tag', start_mark)
+ else:
+ handle = '!'
+ self.forward()
+ suffix = self.scan_tag_uri('tag', start_mark)
+ ch = self.peek()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a tag", start_mark,
+ "expected ' ', but found %r" % ch, self.get_mark())
+ value = (handle, suffix)
+ end_mark = self.get_mark()
+ return TagToken(value, start_mark, end_mark)
+
+ def scan_block_scalar(self, style):
+ # See the specification for details.
+
+ if style == '>':
+ folded = True
+ else:
+ folded = False
+
+ chunks = []
+ start_mark = self.get_mark()
+
+ # Scan the header.
+ self.forward()
+ chomping, increment = self.scan_block_scalar_indicators(start_mark)
+ self.scan_block_scalar_ignored_line(start_mark)
+
+ # Determine the indentation level and go to the first non-empty line.
+ min_indent = self.indent+1
+ if min_indent < 1:
+ min_indent = 1
+ if increment is None:
+ breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
+ indent = max(min_indent, max_indent)
+ else:
+ indent = min_indent+increment-1
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ line_break = ''
+
+ # Scan the inner part of the block scalar.
+ while self.column == indent and self.peek() != '\0':
+ chunks.extend(breaks)
+ leading_non_space = self.peek() not in ' \t'
+ length = 0
+ while self.peek(length) not in '\0\r\n\x85\u2028\u2029':
+ length += 1
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ line_break = self.scan_line_break()
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ if self.column == indent and self.peek() != '\0':
+
+ # Unfortunately, folding rules are ambiguous.
+ #
+ # This is the folding according to the specification:
+
+ if folded and line_break == '\n' \
+ and leading_non_space and self.peek() not in ' \t':
+ if not breaks:
+ chunks.append(' ')
+ else:
+ chunks.append(line_break)
+
+ # This is Clark Evans's interpretation (also in the spec
+ # examples):
+ #
+ #if folded and line_break == '\n':
+ # if not breaks:
+ # if self.peek() not in ' \t':
+ # chunks.append(' ')
+ # else:
+ # chunks.append(line_break)
+ #else:
+ # chunks.append(line_break)
+ else:
+ break
+
+ # Chomp the tail.
+ if chomping is not False:
+ chunks.append(line_break)
+ if chomping is True:
+ chunks.extend(breaks)
+
+ # We are done.
+ return ScalarToken(''.join(chunks), False, start_mark, end_mark,
+ style)
+
+ def scan_block_scalar_indicators(self, start_mark):
+ # See the specification for details.
+ chomping = None
+ increment = None
+ ch = self.peek()
+ if ch in '+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.forward()
+ ch = self.peek()
+ if ch in '0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected indentation indicator in the range 1-9, but found 0",
+ self.get_mark())
+ self.forward()
+ elif ch in '0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected indentation indicator in the range 1-9, but found 0",
+ self.get_mark())
+ self.forward()
+ ch = self.peek()
+ if ch in '+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.forward()
+ ch = self.peek()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected chomping or indentation indicators, but found %r"
+ % ch, self.get_mark())
+ return chomping, increment
+
+ def scan_block_scalar_ignored_line(self, start_mark):
+ # See the specification for details.
+ while self.peek() == ' ':
+ self.forward()
+ if self.peek() == '#':
+ while self.peek() not in '\0\r\n\x85\u2028\u2029':
+ self.forward()
+ ch = self.peek()
+ if ch not in '\0\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected a comment or a line break, but found %r" % ch,
+ self.get_mark())
+ self.scan_line_break()
+
+ def scan_block_scalar_indentation(self):
+ # See the specification for details.
+ chunks = []
+ max_indent = 0
+ end_mark = self.get_mark()
+ while self.peek() in ' \r\n\x85\u2028\u2029':
+ if self.peek() != ' ':
+ chunks.append(self.scan_line_break())
+ end_mark = self.get_mark()
+ else:
+ self.forward()
+ if self.column > max_indent:
+ max_indent = self.column
+ return chunks, max_indent, end_mark
+
+ def scan_block_scalar_breaks(self, indent):
+ # See the specification for details.
+ chunks = []
+ end_mark = self.get_mark()
+ while self.column < indent and self.peek() == ' ':
+ self.forward()
+ while self.peek() in '\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ end_mark = self.get_mark()
+ while self.column < indent and self.peek() == ' ':
+ self.forward()
+ return chunks, end_mark
+
+ def scan_flow_scalar(self, style):
+ # See the specification for details.
+ # Note that we loose indentation rules for quoted scalars. Quoted
+ # scalars don't need to adhere indentation because " and ' clearly
+ # mark the beginning and the end of them. Therefore we are less
+ # restrictive then the specification requires. We only need to check
+ # that document separators are not included in scalars.
+ if style == '"':
+ double = True
+ else:
+ double = False
+ chunks = []
+ start_mark = self.get_mark()
+ quote = self.peek()
+ self.forward()
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ while self.peek() != quote:
+ chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ self.forward()
+ end_mark = self.get_mark()
+ return ScalarToken(''.join(chunks), False, start_mark, end_mark,
+ style)
+
+ ESCAPE_REPLACEMENTS = {
+ '0': '\0',
+ 'a': '\x07',
+ 'b': '\x08',
+ 't': '\x09',
+ '\t': '\x09',
+ 'n': '\x0A',
+ 'v': '\x0B',
+ 'f': '\x0C',
+ 'r': '\x0D',
+ 'e': '\x1B',
+ ' ': '\x20',
+ '\"': '\"',
+ '\\': '\\',
+ 'N': '\x85',
+ '_': '\xA0',
+ 'L': '\u2028',
+ 'P': '\u2029',
+ }
+
+ ESCAPE_CODES = {
+ 'x': 2,
+ 'u': 4,
+ 'U': 8,
+ }
+
+ def scan_flow_scalar_non_spaces(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ while True:
+ length = 0
+ while self.peek(length) not in '\'\"\\\0 \t\r\n\x85\u2028\u2029':
+ length += 1
+ if length:
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ ch = self.peek()
+ if not double and ch == '\'' and self.peek(1) == '\'':
+ chunks.append('\'')
+ self.forward(2)
+ elif (double and ch == '\'') or (not double and ch in '\"\\'):
+ chunks.append(ch)
+ self.forward()
+ elif double and ch == '\\':
+ self.forward()
+ ch = self.peek()
+ if ch in self.ESCAPE_REPLACEMENTS:
+ chunks.append(self.ESCAPE_REPLACEMENTS[ch])
+ self.forward()
+ elif ch in self.ESCAPE_CODES:
+ length = self.ESCAPE_CODES[ch]
+ self.forward()
+ for k in range(length):
+ if self.peek(k) not in '0123456789ABCDEFabcdef':
+ raise ScannerError("while scanning a double-quoted scalar", start_mark,
+ "expected escape sequence of %d hexdecimal numbers, but found %r" %
+ (length, self.peek(k)), self.get_mark())
+ code = int(self.prefix(length), 16)
+ chunks.append(chr(code))
+ self.forward(length)
+ elif ch in '\r\n\x85\u2028\u2029':
+ self.scan_line_break()
+ chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
+ else:
+ raise ScannerError("while scanning a double-quoted scalar", start_mark,
+ "found unknown escape character %r" % ch, self.get_mark())
+ else:
+ return chunks
+
+ def scan_flow_scalar_spaces(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ length = 0
+ while self.peek(length) in ' \t':
+ length += 1
+ whitespaces = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch == '\0':
+ raise ScannerError("while scanning a quoted scalar", start_mark,
+ "found unexpected end of stream", self.get_mark())
+ elif ch in '\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ breaks = self.scan_flow_scalar_breaks(double, start_mark)
+ if line_break != '\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(' ')
+ chunks.extend(breaks)
+ else:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_flow_scalar_breaks(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ while True:
+ # Instead of checking indentation, we check for document
+ # separators.
+ prefix = self.prefix(3)
+ if (prefix == '---' or prefix == '...') \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a quoted scalar", start_mark,
+ "found unexpected document separator", self.get_mark())
+ while self.peek() in ' \t':
+ self.forward()
+ if self.peek() in '\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ else:
+ return chunks
+
+ def scan_plain(self):
+ # See the specification for details.
+ # We add an additional restriction for the flow context:
+ # plain scalars in the flow context cannot contain ',', ':' and '?'.
+ # We also keep track of the `allow_simple_key` flag here.
+ # Indentation rules are loosed for the flow context.
+ chunks = []
+ start_mark = self.get_mark()
+ end_mark = start_mark
+ indent = self.indent+1
+ # We allow zero indentation for scalars, but then we need to check for
+ # document separators at the beginning of the line.
+ #if indent == 0:
+ # indent = 1
+ spaces = []
+ while True:
+ length = 0
+ if self.peek() == '#':
+ break
+ while True:
+ ch = self.peek(length)
+ if ch in '\0 \t\r\n\x85\u2028\u2029' \
+ or (not self.flow_level and ch == ':' and
+ self.peek(length+1) in '\0 \t\r\n\x85\u2028\u2029') \
+ or (self.flow_level and ch in ',:?[]{}'):
+ break
+ length += 1
+ # It's not clear what we should do with ':' in the flow context.
+ if (self.flow_level and ch == ':'
+ and self.peek(length+1) not in '\0 \t\r\n\x85\u2028\u2029,[]{}'):
+ self.forward(length)
+ raise ScannerError("while scanning a plain scalar", start_mark,
+ "found unexpected ':'", self.get_mark(),
+ "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.")
+ if length == 0:
+ break
+ self.allow_simple_key = False
+ chunks.extend(spaces)
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ end_mark = self.get_mark()
+ spaces = self.scan_plain_spaces(indent, start_mark)
+ if not spaces or self.peek() == '#' \
+ or (not self.flow_level and self.column < indent):
+ break
+ return ScalarToken(''.join(chunks), True, start_mark, end_mark)
+
+ def scan_plain_spaces(self, indent, start_mark):
+ # See the specification for details.
+ # The specification is really confusing about tabs in plain scalars.
+ # We just forbid them completely. Do not use tabs in YAML!
+ chunks = []
+ length = 0
+ while self.peek(length) in ' ':
+ length += 1
+ whitespaces = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch in '\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ self.allow_simple_key = True
+ prefix = self.prefix(3)
+ if (prefix == '---' or prefix == '...') \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ return
+ breaks = []
+ while self.peek() in ' \r\n\x85\u2028\u2029':
+ if self.peek() == ' ':
+ self.forward()
+ else:
+ breaks.append(self.scan_line_break())
+ prefix = self.prefix(3)
+ if (prefix == '---' or prefix == '...') \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ return
+ if line_break != '\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(' ')
+ chunks.extend(breaks)
+ elif whitespaces:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_tag_handle(self, name, start_mark):
+ # See the specification for details.
+ # For some strange reasons, the specification does not allow '_' in
+ # tag handles. I have allowed it anyway.
+ ch = self.peek()
+ if ch != '!':
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected '!', but found %r" % ch, self.get_mark())
+ length = 1
+ ch = self.peek(length)
+ if ch != ' ':
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_':
+ length += 1
+ ch = self.peek(length)
+ if ch != '!':
+ self.forward(length)
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected '!', but found %r" % ch, self.get_mark())
+ length += 1
+ value = self.prefix(length)
+ self.forward(length)
+ return value
+
+ def scan_tag_uri(self, name, start_mark):
+ # See the specification for details.
+ # Note: we do not check if URI is well-formed.
+ chunks = []
+ length = 0
+ ch = self.peek(length)
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-;/?:@&=+$,_.!~*\'()[]%':
+ if ch == '%':
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ length = 0
+ chunks.append(self.scan_uri_escapes(name, start_mark))
+ else:
+ length += 1
+ ch = self.peek(length)
+ if length:
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ length = 0
+ if not chunks:
+ raise ScannerError("while parsing a %s" % name, start_mark,
+ "expected URI, but found %r" % ch, self.get_mark())
+ return ''.join(chunks)
+
+ def scan_uri_escapes(self, name, start_mark):
+ # See the specification for details.
+ codes = []
+ mark = self.get_mark()
+ while self.peek() == '%':
+ self.forward()
+ for k in range(2):
+ if self.peek(k) not in '0123456789ABCDEFabcdef':
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected URI escape sequence of 2 hexdecimal numbers, but found %r"
+ % self.peek(k), self.get_mark())
+ codes.append(int(self.prefix(2), 16))
+ self.forward(2)
+ try:
+ value = bytes(codes).decode('utf-8')
+ except UnicodeDecodeError as exc:
+ raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
+ return value
+
+ def scan_line_break(self):
+ # Transforms:
+ # '\r\n' : '\n'
+ # '\r' : '\n'
+ # '\n' : '\n'
+ # '\x85' : '\n'
+ # '\u2028' : '\u2028'
+ # '\u2029 : '\u2029'
+ # default : ''
+ ch = self.peek()
+ if ch in '\r\n\x85':
+ if self.prefix(2) == '\r\n':
+ self.forward(2)
+ else:
+ self.forward()
+ return '\n'
+ elif ch in '\u2028\u2029':
+ self.forward()
+ return ch
+ return ''
+
+#try:
+# import psyco
+# psyco.bind(Scanner)
+#except ImportError:
+# pass
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/serializer.py b/collectors/python.d.plugin/python_modules/pyyaml3/serializer.py
new file mode 100644
index 000000000..1ba2f7f9d
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/serializer.py
@@ -0,0 +1,112 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['Serializer', 'SerializerError']
+
+from .error import YAMLError
+from .events import *
+from .nodes import *
+
+class SerializerError(YAMLError):
+ pass
+
+class Serializer:
+
+ ANCHOR_TEMPLATE = 'id%03d'
+
+ def __init__(self, encoding=None,
+ explicit_start=None, explicit_end=None, version=None, tags=None):
+ self.use_encoding = encoding
+ self.use_explicit_start = explicit_start
+ self.use_explicit_end = explicit_end
+ self.use_version = version
+ self.use_tags = tags
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+ self.closed = None
+
+ def open(self):
+ if self.closed is None:
+ self.emit(StreamStartEvent(encoding=self.use_encoding))
+ self.closed = False
+ elif self.closed:
+ raise SerializerError("serializer is closed")
+ else:
+ raise SerializerError("serializer is already opened")
+
+ def close(self):
+ if self.closed is None:
+ raise SerializerError("serializer is not opened")
+ elif not self.closed:
+ self.emit(StreamEndEvent())
+ self.closed = True
+
+ #def __del__(self):
+ # self.close()
+
+ def serialize(self, node):
+ if self.closed is None:
+ raise SerializerError("serializer is not opened")
+ elif self.closed:
+ raise SerializerError("serializer is closed")
+ self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
+ version=self.use_version, tags=self.use_tags))
+ self.anchor_node(node)
+ self.serialize_node(node, None, None)
+ self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+
+ def anchor_node(self, node):
+ if node in self.anchors:
+ if self.anchors[node] is None:
+ self.anchors[node] = self.generate_anchor(node)
+ else:
+ self.anchors[node] = None
+ if isinstance(node, SequenceNode):
+ for item in node.value:
+ self.anchor_node(item)
+ elif isinstance(node, MappingNode):
+ for key, value in node.value:
+ self.anchor_node(key)
+ self.anchor_node(value)
+
+ def generate_anchor(self, node):
+ self.last_anchor_id += 1
+ return self.ANCHOR_TEMPLATE % self.last_anchor_id
+
+ def serialize_node(self, node, parent, index):
+ alias = self.anchors[node]
+ if node in self.serialized_nodes:
+ self.emit(AliasEvent(alias))
+ else:
+ self.serialized_nodes[node] = True
+ self.descend_resolver(parent, index)
+ if isinstance(node, ScalarNode):
+ detected_tag = self.resolve(ScalarNode, node.value, (True, False))
+ default_tag = self.resolve(ScalarNode, node.value, (False, True))
+ implicit = (node.tag == detected_tag), (node.tag == default_tag)
+ self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
+ style=node.style))
+ elif isinstance(node, SequenceNode):
+ implicit = (node.tag
+ == self.resolve(SequenceNode, node.value, True))
+ self.emit(SequenceStartEvent(alias, node.tag, implicit,
+ flow_style=node.flow_style))
+ index = 0
+ for item in node.value:
+ self.serialize_node(item, node, index)
+ index += 1
+ self.emit(SequenceEndEvent())
+ elif isinstance(node, MappingNode):
+ implicit = (node.tag
+ == self.resolve(MappingNode, node.value, True))
+ self.emit(MappingStartEvent(alias, node.tag, implicit,
+ flow_style=node.flow_style))
+ for key, value in node.value:
+ self.serialize_node(key, node, None)
+ self.serialize_node(value, node, key)
+ self.emit(MappingEndEvent())
+ self.ascend_resolver()
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/tokens.py b/collectors/python.d.plugin/python_modules/pyyaml3/tokens.py
new file mode 100644
index 000000000..c5c4fb116
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/tokens.py
@@ -0,0 +1,105 @@
+# SPDX-License-Identifier: MIT
+
+class Token(object):
+ def __init__(self, start_mark, end_mark):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ attributes = [key for key in self.__dict__
+ if not key.endswith('_mark')]
+ attributes.sort()
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+ for key in attributes])
+ return '%s(%s)' % (self.__class__.__name__, arguments)
+
+#class BOMToken(Token):
+# id = '<byte order mark>'
+
+class DirectiveToken(Token):
+ id = '<directive>'
+ def __init__(self, name, value, start_mark, end_mark):
+ self.name = name
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class DocumentStartToken(Token):
+ id = '<document start>'
+
+class DocumentEndToken(Token):
+ id = '<document end>'
+
+class StreamStartToken(Token):
+ id = '<stream start>'
+ def __init__(self, start_mark=None, end_mark=None,
+ encoding=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.encoding = encoding
+
+class StreamEndToken(Token):
+ id = '<stream end>'
+
+class BlockSequenceStartToken(Token):
+ id = '<block sequence start>'
+
+class BlockMappingStartToken(Token):
+ id = '<block mapping start>'
+
+class BlockEndToken(Token):
+ id = '<block end>'
+
+class FlowSequenceStartToken(Token):
+ id = '['
+
+class FlowMappingStartToken(Token):
+ id = '{'
+
+class FlowSequenceEndToken(Token):
+ id = ']'
+
+class FlowMappingEndToken(Token):
+ id = '}'
+
+class KeyToken(Token):
+ id = '?'
+
+class ValueToken(Token):
+ id = ':'
+
+class BlockEntryToken(Token):
+ id = '-'
+
+class FlowEntryToken(Token):
+ id = ','
+
+class AliasToken(Token):
+ id = '<alias>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class AnchorToken(Token):
+ id = '<anchor>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class TagToken(Token):
+ id = '<tag>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class ScalarToken(Token):
+ id = '<scalar>'
+ def __init__(self, value, plain, start_mark, end_mark, style=None):
+ self.value = value
+ self.plain = plain
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
diff --git a/collectors/python.d.plugin/python_modules/third_party/__init__.py b/collectors/python.d.plugin/python_modules/third_party/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/third_party/__init__.py
diff --git a/collectors/python.d.plugin/python_modules/third_party/boinc_client.py b/collectors/python.d.plugin/python_modules/third_party/boinc_client.py
new file mode 100644
index 000000000..ec21779a0
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/third_party/boinc_client.py
@@ -0,0 +1,515 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# client.py - Somewhat higher-level GUI_RPC API for BOINC core client
+#
+# Copyright (C) 2013 Rodrigo Silva (MestreLion) <linux@rodrigosilva.com>
+# Copyright (C) 2017 Austin S. Hemmelgarn
+#
+# SPDX-License-Identifier: GPL-3.0
+
+# Based on client/boinc_cmd.cpp
+
+import hashlib
+import socket
+import sys
+import time
+from functools import total_ordering
+from xml.etree import ElementTree
+
+GUI_RPC_PASSWD_FILE = "/var/lib/boinc/gui_rpc_auth.cfg"
+
+GUI_RPC_HOSTNAME = None # localhost
+GUI_RPC_PORT = 31416
+GUI_RPC_TIMEOUT = 1
+
+class Rpc(object):
+ ''' Class to perform GUI RPC calls to a BOINC core client.
+ Usage in a context manager ('with' block) is recommended to ensure
+ disconnect() is called. Using the same instance for all calls is also
+ recommended so it reuses the same socket connection
+ '''
+ def __init__(self, hostname="", port=0, timeout=0, text_output=False):
+ self.hostname = hostname
+ self.port = port
+ self.timeout = timeout
+ self.sock = None
+ self.text_output = text_output
+
+ @property
+ def sockargs(self):
+ return (self.hostname, self.port, self.timeout)
+
+ def __enter__(self): self.connect(*self.sockargs); return self
+ def __exit__(self, *args): self.disconnect()
+
+ def connect(self, hostname="", port=0, timeout=0):
+ ''' Connect to (hostname, port) with timeout in seconds.
+ Hostname defaults to None (localhost), and port to 31416
+ Calling multiple times will disconnect previous connection (if any),
+ and (re-)connect to host.
+ '''
+ if self.sock:
+ self.disconnect()
+
+ self.hostname = hostname or GUI_RPC_HOSTNAME
+ self.port = port or GUI_RPC_PORT
+ self.timeout = timeout or GUI_RPC_TIMEOUT
+
+ self.sock = socket.create_connection(self.sockargs[0:2], self.sockargs[2])
+
+ def disconnect(self):
+ ''' Disconnect from host. Calling multiple times is OK (idempotent)
+ '''
+ if self.sock:
+ self.sock.close()
+ self.sock = None
+
+ def call(self, request, text_output=None):
+ ''' Do an RPC call. Pack and send the XML request and return the
+ unpacked reply. request can be either plain XML text or a
+ xml.etree.ElementTree.Element object. Return ElementTree.Element
+ or XML text according to text_output flag.
+ Will auto-connect if not connected.
+ '''
+ if text_output is None:
+ text_output = self.text_output
+
+ if not self.sock:
+ self.connect(*self.sockargs)
+
+ if not isinstance(request, ElementTree.Element):
+ request = ElementTree.fromstring(request)
+
+ # pack request
+ end = '\003'
+ if sys.version_info[0] < 3:
+ req = "<boinc_gui_rpc_request>\n{0}\n</boinc_gui_rpc_request>\n{1}".format(ElementTree.tostring(request).replace(' />', '/>'), end)
+ else:
+ req = "<boinc_gui_rpc_request>\n{0}\n</boinc_gui_rpc_request>\n{1}".format(ElementTree.tostring(request, encoding='unicode').replace(' />', '/>'), end).encode()
+
+ try:
+ self.sock.sendall(req)
+ except (socket.error, socket.herror, socket.gaierror, socket.timeout):
+ raise
+
+ req = ""
+ while True:
+ try:
+ buf = self.sock.recv(8192)
+ if not buf:
+ raise socket.error("No data from socket")
+ if sys.version_info[0] >= 3:
+ buf = buf.decode()
+ except socket.error:
+ raise
+ n = buf.find(end)
+ if not n == -1: break
+ req += buf
+ req += buf[:n]
+
+ # unpack reply (remove root tag, ie: first and last lines)
+ req = '\n'.join(req.strip().rsplit('\n')[1:-1])
+
+ if text_output:
+ return req
+ else:
+ return ElementTree.fromstring(req)
+
+def setattrs_from_xml(obj, xml, attrfuncdict={}):
+ ''' Helper to set values for attributes of a class instance by mapping
+ matching tags from a XML file.
+ attrfuncdict is a dict of functions to customize value data type of
+ each attribute. It falls back to simple int/float/bool/str detection
+ based on values defined in __init__(). This would not be needed if
+ Boinc used standard RPC protocol, which includes data type in XML.
+ '''
+ if not isinstance(xml, ElementTree.Element):
+ xml = ElementTree.fromstring(xml)
+ for e in list(xml):
+ if hasattr(obj, e.tag):
+ attr = getattr(obj, e.tag)
+ attrfunc = attrfuncdict.get(e.tag, None)
+ if attrfunc is None:
+ if isinstance(attr, bool): attrfunc = parse_bool
+ elif isinstance(attr, int): attrfunc = parse_int
+ elif isinstance(attr, float): attrfunc = parse_float
+ elif isinstance(attr, str): attrfunc = parse_str
+ elif isinstance(attr, list): attrfunc = parse_list
+ else: attrfunc = lambda x: x
+ setattr(obj, e.tag, attrfunc(e))
+ else:
+ pass
+ #print "class missing attribute '%s': %r" % (e.tag, obj)
+ return obj
+
+
+def parse_bool(e):
+ ''' Helper to convert ElementTree.Element.text to boolean.
+ Treat '<foo/>' (and '<foo>[[:blank:]]</foo>') as True
+ Treat '0' and 'false' as False
+ '''
+ if e.text is None:
+ return True
+ else:
+ return bool(e.text) and not e.text.strip().lower() in ('0', 'false')
+
+
+def parse_int(e):
+ ''' Helper to convert ElementTree.Element.text to integer.
+ Treat '<foo/>' (and '<foo></foo>') as 0
+ '''
+ # int(float()) allows casting to int a value expressed as float in XML
+ return 0 if e.text is None else int(float(e.text.strip()))
+
+
+def parse_float(e):
+ ''' Helper to convert ElementTree.Element.text to float. '''
+ return 0.0 if e.text is None else float(e.text.strip())
+
+
+def parse_str(e):
+ ''' Helper to convert ElementTree.Element.text to string. '''
+ return "" if e.text is None else e.text.strip()
+
+
+def parse_list(e):
+ ''' Helper to convert ElementTree.Element to list. For now, simply return
+ the list of root element's children
+ '''
+ return list(e)
+
+
+class Enum(object):
+ UNKNOWN = -1 # Not in original API
+
+ @classmethod
+ def name(cls, value):
+ ''' Quick-and-dirty fallback for getting the "name" of an enum item '''
+
+ # value as string, if it matches an enum attribute.
+ # Allows short usage as Enum.name("VALUE") besides Enum.name(Enum.VALUE)
+ if hasattr(cls, str(value)):
+ return cls.name(getattr(cls, value, None))
+
+ # value not handled in subclass name()
+ for k, v in cls.__dict__.items():
+ if v == value:
+ return k.lower().replace('_', ' ')
+
+ # value not found
+ return cls.name(Enum.UNKNOWN)
+
+
+class CpuSched(Enum):
+ ''' values of ACTIVE_TASK::scheduler_state and ACTIVE_TASK::next_scheduler_state
+ "SCHEDULED" is synonymous with "executing" except when CPU throttling
+ is in use.
+ '''
+ UNINITIALIZED = 0
+ PREEMPTED = 1
+ SCHEDULED = 2
+
+
+class ResultState(Enum):
+ ''' Values of RESULT::state in client.
+ THESE MUST BE IN NUMERICAL ORDER
+ (because of the > comparison in RESULT::computing_done())
+ see html/inc/common_defs.inc
+ '''
+ NEW = 0
+ #// New result
+ FILES_DOWNLOADING = 1
+ #// Input files for result (WU, app version) are being downloaded
+ FILES_DOWNLOADED = 2
+ #// Files are downloaded, result can be (or is being) computed
+ COMPUTE_ERROR = 3
+ #// computation failed; no file upload
+ FILES_UPLOADING = 4
+ #// Output files for result are being uploaded
+ FILES_UPLOADED = 5
+ #// Files are uploaded, notify scheduling server at some point
+ ABORTED = 6
+ #// result was aborted
+ UPLOAD_FAILED = 7
+ #// some output file permanent failure
+
+
+class Process(Enum):
+ ''' values of ACTIVE_TASK::task_state '''
+ UNINITIALIZED = 0
+ #// process doesn't exist yet
+ EXECUTING = 1
+ #// process is running, as far as we know
+ SUSPENDED = 9
+ #// we've sent it a "suspend" message
+ ABORT_PENDING = 5
+ #// process exceeded limits; send "abort" message, waiting to exit
+ QUIT_PENDING = 8
+ #// we've sent it a "quit" message, waiting to exit
+ COPY_PENDING = 10
+ #// waiting for async file copies to finish
+
+
+class _Struct(object):
+ ''' base helper class with common methods for all classes derived from
+ BOINC's C++ structs
+ '''
+ @classmethod
+ def parse(cls, xml):
+ return setattrs_from_xml(cls(), xml)
+
+ def __str__(self, indent=0):
+ buf = '{0}{1}:\n'.format('\t' * indent, self.__class__.__name__)
+ for attr in self.__dict__:
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ buf += '{0}\t{1} [\n'.format('\t' * indent, attr)
+ for v in value: buf += '\t\t{0}\t\t,\n'.format(v)
+ buf += '\t]\n'
+ else:
+ buf += '{0}\t{1}\t{2}\n'.format('\t' * indent,
+ attr,
+ value.__str__(indent+2)
+ if isinstance(value, _Struct)
+ else repr(value))
+ return buf
+
+
+@total_ordering
+class VersionInfo(_Struct):
+ def __init__(self, major=0, minor=0, release=0):
+ self.major = major
+ self.minor = minor
+ self.release = release
+
+ @property
+ def _tuple(self):
+ return (self.major, self.minor, self.release)
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self._tuple == other._tuple
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __gt__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return self._tuple > other._tuple
+
+ def __str__(self):
+ return "{0}.{1}.{2}".format(self.major, self.minor, self.release)
+
+ def __repr__(self):
+ return "{0}{1}".format(self.__class__.__name__, self._tuple)
+
+
+class Result(_Struct):
+ ''' Also called "task" in some contexts '''
+ def __init__(self):
+ # Names and values follow lib/gui_rpc_client.h @ RESULT
+ # Order too, except when grouping contradicts client/result.cpp
+ # RESULT::write_gui(), then XML order is used.
+
+ self.name = ""
+ self.wu_name = ""
+ self.version_num = 0
+ #// identifies the app used
+ self.plan_class = ""
+ self.project_url = "" # from PROJECT.master_url
+ self.report_deadline = 0.0 # seconds since epoch
+ self.received_time = 0.0 # seconds since epoch
+ #// when we got this from server
+ self.ready_to_report = False
+ #// we're ready to report this result to the server;
+ #// either computation is done and all the files have been uploaded
+ #// or there was an error
+ self.got_server_ack = False
+ #// we've received the ack for this result from the server
+ self.final_cpu_time = 0.0
+ self.final_elapsed_time = 0.0
+ self.state = ResultState.NEW
+ self.estimated_cpu_time_remaining = 0.0
+ #// actually, estimated elapsed time remaining
+ self.exit_status = 0
+ #// return value from the application
+ self.suspended_via_gui = False
+ self.project_suspended_via_gui = False
+ self.edf_scheduled = False
+ #// temporary used to tell GUI that this result is deadline-scheduled
+ self.coproc_missing = False
+ #// a coproc needed by this job is missing
+ #// (e.g. because user removed their GPU board).
+ self.scheduler_wait = False
+ self.scheduler_wait_reason = ""
+ self.network_wait = False
+ self.resources = ""
+ #// textual description of resources used
+
+ #// the following defined if active
+ # XML is generated in client/app.cpp ACTIVE_TASK::write_gui()
+ self.active_task = False
+ self.active_task_state = Process.UNINITIALIZED
+ self.app_version_num = 0
+ self.slot = -1
+ self.pid = 0
+ self.scheduler_state = CpuSched.UNINITIALIZED
+ self.checkpoint_cpu_time = 0.0
+ self.current_cpu_time = 0.0
+ self.fraction_done = 0.0
+ self.elapsed_time = 0.0
+ self.swap_size = 0
+ self.working_set_size_smoothed = 0.0
+ self.too_large = False
+ self.needs_shmem = False
+ self.graphics_exec_path = ""
+ self.web_graphics_url = ""
+ self.remote_desktop_addr = ""
+ self.slot_path = ""
+ #// only present if graphics_exec_path is
+
+ # The following are not in original API, but are present in RPC XML reply
+ self.completed_time = 0.0
+ #// time when ready_to_report was set
+ self.report_immediately = False
+ self.working_set_size = 0
+ self.page_fault_rate = 0.0
+ #// derived by higher-level code
+
+ # The following are in API, but are NEVER in RPC XML reply. Go figure
+ self.signal = 0
+
+ self.app = None # APP*
+ self.wup = None # WORKUNIT*
+ self.project = None # PROJECT*
+ self.avp = None # APP_VERSION*
+
+ @classmethod
+ def parse(cls, xml):
+ if not isinstance(xml, ElementTree.Element):
+ xml = ElementTree.fromstring(xml)
+
+ # parse main XML
+ result = super(Result, cls).parse(xml)
+
+ # parse '<active_task>' children
+ active_task = xml.find('active_task')
+ if active_task is None:
+ result.active_task = False # already the default after __init__()
+ else:
+ result.active_task = True # already the default after main parse
+ result = setattrs_from_xml(result, active_task)
+
+ #// if CPU time is nonzero but elapsed time is zero,
+ #// we must be talking to an old client.
+ #// Set elapsed = CPU
+ #// (easier to deal with this here than in the manager)
+ if result.current_cpu_time != 0 and result.elapsed_time == 0:
+ result.elapsed_time = result.current_cpu_time
+
+ if result.final_cpu_time != 0 and result.final_elapsed_time == 0:
+ result.final_elapsed_time = result.final_cpu_time
+
+ return result
+
+ def __str__(self):
+ buf = '{0}:\n'.format(self.__class__.__name__)
+ for attr in self.__dict__:
+ value = getattr(self, attr)
+ if attr in ['received_time', 'report_deadline']:
+ value = time.ctime(value)
+ buf += '\t{0}\t{1}\n'.format(attr, value)
+ return buf
+
+
+class BoincClient(object):
+
+ def __init__(self, host="", port=0, passwd=None):
+ self.hostname = host
+ self.port = port
+ self.passwd = passwd
+ self.rpc = Rpc(text_output=False)
+ self.version = None
+ self.authorized = False
+
+ # Informative, not authoritative. Records status of *last* RPC call,
+ # but does not infer success about the *next* one.
+ # Thus, it should be read *after* an RPC call, not prior to one
+ self.connected = False
+
+ def __enter__(self): self.connect(); return self
+ def __exit__(self, *args): self.disconnect()
+
+ def connect(self):
+ try:
+ self.rpc.connect(self.hostname, self.port)
+ self.connected = True
+ except socket.error:
+ self.connected = False
+ return
+ self.authorized = self.authorize(self.passwd)
+ self.version = self.exchange_versions()
+
+ def disconnect(self):
+ self.rpc.disconnect()
+
+ def authorize(self, password):
+ ''' Request authorization. If password is None and we are connecting
+ to localhost, try to read password from the local config file
+ GUI_RPC_PASSWD_FILE. If file can't be read (not found or no
+ permission to read), try to authorize with a blank password.
+ If authorization is requested and fails, all subsequent calls
+ will be refused with socket.error 'Connection reset by peer' (104).
+ Since most local calls do no require authorization, do not attempt
+ it if you're not sure about the password.
+ '''
+ if password is None and not self.hostname:
+ password = read_gui_rpc_password() or ""
+ nonce = self.rpc.call('<auth1/>').text
+ authhash = hashlib.md5('{0}{1}'.format(nonce, password).encode()).hexdigest().lower()
+ reply = self.rpc.call('<auth2><nonce_hash>{0}</nonce_hash></auth2>'.format(authhash))
+
+ if reply.tag == 'authorized':
+ return True
+ else:
+ return False
+
+ def exchange_versions(self):
+ ''' Return VersionInfo instance with core client version info '''
+ return VersionInfo.parse(self.rpc.call('<exchange_versions/>'))
+
+ def get_tasks(self):
+ ''' Same as get_results(active_only=False) '''
+ return self.get_results(False)
+
+ def get_results(self, active_only=False):
+ ''' Get a list of results.
+ Those that are in progress will have information such as CPU time
+ and fraction done. Each result includes a name;
+ Use CC_STATE::lookup_result() to find this result in the current static state;
+ if it's not there, call get_state() again.
+ '''
+ reply = self.rpc.call("<get_results><active_only>{0}</active_only></get_results>".format(1 if active_only else 0))
+ if not reply.tag == 'results':
+ return []
+
+ results = []
+ for item in list(reply):
+ results.append(Result.parse(item))
+
+ return results
+
+
+def read_gui_rpc_password():
+ ''' Read password string from GUI_RPC_PASSWD_FILE file, trim the last CR
+ (if any), and return it
+ '''
+ try:
+ with open(GUI_RPC_PASSWD_FILE, 'r') as f:
+ buf = f.read()
+ if buf.endswith('\n'): return buf[:-1] # trim last CR
+ else: return buf
+ except IOError:
+ # Permission denied or File not found.
+ pass
diff --git a/collectors/python.d.plugin/python_modules/third_party/lm_sensors.py b/collectors/python.d.plugin/python_modules/third_party/lm_sensors.py
new file mode 100644
index 000000000..f10cd6209
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/third_party/lm_sensors.py
@@ -0,0 +1,258 @@
+# SPDX-License-Identifier: LGPL-2.1
+"""
+@package sensors.py
+Python Bindings for libsensors3
+
+use the documentation of libsensors for the low level API.
+see example.py for high level API usage.
+
+@author: Pavel Rojtberg (http://www.rojtberg.net)
+@see: https://github.com/paroj/sensors.py
+@copyright: LGPLv2 (same as libsensors) <http://opensource.org/licenses/LGPL-2.1>
+"""
+
+from ctypes import *
+import ctypes.util
+
+_libc = cdll.LoadLibrary(ctypes.util.find_library("c"))
+# see https://github.com/paroj/sensors.py/issues/1
+_libc.free.argtypes = [c_void_p]
+_hdl = cdll.LoadLibrary(ctypes.util.find_library("sensors"))
+
+version = c_char_p.in_dll(_hdl, "libsensors_version").value.decode("ascii")
+
+
+class bus_id(Structure):
+ _fields_ = [("type", c_short),
+ ("nr", c_short)]
+
+
+class chip_name(Structure):
+ _fields_ = [("prefix", c_char_p),
+ ("bus", bus_id),
+ ("addr", c_int),
+ ("path", c_char_p)]
+
+
+class feature(Structure):
+ _fields_ = [("name", c_char_p),
+ ("number", c_int),
+ ("type", c_int)]
+
+ # sensors_feature_type
+ IN = 0x00
+ FAN = 0x01
+ TEMP = 0x02
+ POWER = 0x03
+ ENERGY = 0x04
+ CURR = 0x05
+ HUMIDITY = 0x06
+ MAX_MAIN = 0x7
+ VID = 0x10
+ INTRUSION = 0x11
+ MAX_OTHER = 0x12
+ BEEP_ENABLE = 0x18
+
+
+class subfeature(Structure):
+ _fields_ = [("name", c_char_p),
+ ("number", c_int),
+ ("type", c_int),
+ ("mapping", c_int),
+ ("flags", c_uint)]
+
+
+_hdl.sensors_get_detected_chips.restype = POINTER(chip_name)
+_hdl.sensors_get_features.restype = POINTER(feature)
+_hdl.sensors_get_all_subfeatures.restype = POINTER(subfeature)
+_hdl.sensors_get_label.restype = c_void_p # return pointer instead of str so we can free it
+_hdl.sensors_get_adapter_name.restype = c_char_p # docs do not say whether to free this or not
+_hdl.sensors_strerror.restype = c_char_p
+
+### RAW API ###
+MODE_R = 1
+MODE_W = 2
+COMPUTE_MAPPING = 4
+
+
+def init(cfg_file=None):
+ file = _libc.fopen(cfg_file.encode("utf-8"), "r") if cfg_file is not None else None
+
+ if _hdl.sensors_init(file) != 0:
+ raise Exception("sensors_init failed")
+
+ if file is not None:
+ _libc.fclose(file)
+
+
+def cleanup():
+ _hdl.sensors_cleanup()
+
+
+def parse_chip_name(orig_name):
+ ret = chip_name()
+ err = _hdl.sensors_parse_chip_name(orig_name.encode("utf-8"), byref(ret))
+
+ if err < 0:
+ raise Exception(strerror(err))
+
+ return ret
+
+
+def strerror(errnum):
+ return _hdl.sensors_strerror(errnum).decode("utf-8")
+
+
+def free_chip_name(chip):
+ _hdl.sensors_free_chip_name(byref(chip))
+
+
+def get_detected_chips(match, nr):
+ """
+ @return: (chip, next nr to query)
+ """
+ _nr = c_int(nr)
+
+ if match is not None:
+ match = byref(match)
+
+ chip = _hdl.sensors_get_detected_chips(match, byref(_nr))
+ chip = chip.contents if bool(chip) else None
+ return chip, _nr.value
+
+
+def chip_snprintf_name(chip, buffer_size=200):
+ """
+ @param buffer_size defaults to the size used in the sensors utility
+ """
+ ret = create_string_buffer(buffer_size)
+ err = _hdl.sensors_snprintf_chip_name(ret, buffer_size, byref(chip))
+
+ if err < 0:
+ raise Exception(strerror(err))
+
+ return ret.value.decode("utf-8")
+
+
+def do_chip_sets(chip):
+ """
+ @attention this function was not tested
+ """
+ err = _hdl.sensors_do_chip_sets(byref(chip))
+ if err < 0:
+ raise Exception(strerror(err))
+
+
+def get_adapter_name(bus):
+ return _hdl.sensors_get_adapter_name(byref(bus)).decode("utf-8")
+
+
+def get_features(chip, nr):
+ """
+ @return: (feature, next nr to query)
+ """
+ _nr = c_int(nr)
+ feature = _hdl.sensors_get_features(byref(chip), byref(_nr))
+ feature = feature.contents if bool(feature) else None
+ return feature, _nr.value
+
+
+def get_label(chip, feature):
+ ptr = _hdl.sensors_get_label(byref(chip), byref(feature))
+ val = cast(ptr, c_char_p).value.decode("utf-8")
+ _libc.free(ptr)
+ return val
+
+
+def get_all_subfeatures(chip, feature, nr):
+ """
+ @return: (subfeature, next nr to query)
+ """
+ _nr = c_int(nr)
+ subfeature = _hdl.sensors_get_all_subfeatures(byref(chip), byref(feature), byref(_nr))
+ subfeature = subfeature.contents if bool(subfeature) else None
+ return subfeature, _nr.value
+
+
+def get_value(chip, subfeature_nr):
+ val = c_double()
+ err = _hdl.sensors_get_value(byref(chip), subfeature_nr, byref(val))
+ if err < 0:
+ raise Exception(strerror(err))
+ return val.value
+
+
+def set_value(chip, subfeature_nr, value):
+ """
+ @attention this function was not tested
+ """
+ val = c_double(value)
+ err = _hdl.sensors_set_value(byref(chip), subfeature_nr, byref(val))
+ if err < 0:
+ raise Exception(strerror(err))
+
+
+### Convenience API ###
+class ChipIterator:
+ def __init__(self, match=None):
+ self.match = parse_chip_name(match) if match is not None else None
+ self.nr = 0
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ chip, self.nr = get_detected_chips(self.match, self.nr)
+
+ if chip is None:
+ raise StopIteration
+
+ return chip
+
+ def __del__(self):
+ if self.match is not None:
+ free_chip_name(self.match)
+
+ def next(self): # python2 compability
+ return self.__next__()
+
+
+class FeatureIterator:
+ def __init__(self, chip):
+ self.chip = chip
+ self.nr = 0
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ feature, self.nr = get_features(self.chip, self.nr)
+
+ if feature is None:
+ raise StopIteration
+
+ return feature
+
+ def next(self): # python2 compability
+ return self.__next__()
+
+
+class SubFeatureIterator:
+ def __init__(self, chip, feature):
+ self.chip = chip
+ self.feature = feature
+ self.nr = 0
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ subfeature, self.nr = get_all_subfeatures(self.chip, self.feature, self.nr)
+
+ if subfeature is None:
+ raise StopIteration
+
+ return subfeature
+
+ def next(self): # python2 compability
+ return self.__next__()
diff --git a/collectors/python.d.plugin/python_modules/third_party/mcrcon.py b/collectors/python.d.plugin/python_modules/third_party/mcrcon.py
new file mode 100644
index 000000000..a65a304b6
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/third_party/mcrcon.py
@@ -0,0 +1,74 @@
+# Minecraft Remote Console module.
+#
+# Copyright (C) 2015 Barnaby Gale
+#
+# SPDX-License-Identifier: MIT
+
+import socket
+import select
+import struct
+import time
+
+
+class MCRconException(Exception):
+ pass
+
+
+class MCRcon(object):
+ socket = None
+
+ def connect(self, host, port, password):
+ if self.socket is not None:
+ raise MCRconException("Already connected")
+ self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ self.socket.settimeout(0.9)
+ self.socket.connect((host, port))
+ self.send(3, password)
+
+ def disconnect(self):
+ if self.socket is None:
+ raise MCRconException("Already disconnected")
+ self.socket.close()
+ self.socket = None
+
+ def read(self, length):
+ data = b""
+ while len(data) < length:
+ data += self.socket.recv(length - len(data))
+ return data
+
+ def send(self, out_type, out_data):
+ if self.socket is None:
+ raise MCRconException("Must connect before sending data")
+
+ # Send a request packet
+ out_payload = struct.pack('<ii', 0, out_type) + out_data.encode('utf8') + b'\x00\x00'
+ out_length = struct.pack('<i', len(out_payload))
+ self.socket.send(out_length + out_payload)
+
+ # Read response packets
+ in_data = ""
+ while True:
+ # Read a packet
+ in_length, = struct.unpack('<i', self.read(4))
+ in_payload = self.read(in_length)
+ in_id = struct.unpack('<ii', in_payload[:8])
+ in_data_partial, in_padding = in_payload[8:-2], in_payload[-2:]
+
+ # Sanity checks
+ if in_padding != b'\x00\x00':
+ raise MCRconException("Incorrect padding")
+ if in_id == -1:
+ raise MCRconException("Login failed")
+
+ # Record the response
+ in_data += in_data_partial.decode('utf8')
+
+ # If there's nothing more to receive, return the response
+ if len(select.select([self.socket], [], [], 0)[0]) == 0:
+ return in_data
+
+ def command(self, command):
+ result = self.send(2, command)
+ time.sleep(0.003) # MC-72390 workaround
+ return result
diff --git a/collectors/python.d.plugin/python_modules/third_party/monotonic.py b/collectors/python.d.plugin/python_modules/third_party/monotonic.py
new file mode 100644
index 000000000..da04bb857
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/third_party/monotonic.py
@@ -0,0 +1,171 @@
+# -*- coding: utf-8 -*-
+#
+# SPDX-License-Identifier: Apache-2.0
+"""
+ monotonic
+ ~~~~~~~~~
+
+ This module provides a ``monotonic()`` function which returns the
+ value (in fractional seconds) of a clock which never goes backwards.
+
+ On Python 3.3 or newer, ``monotonic`` will be an alias of
+ ``time.monotonic`` from the standard library. On older versions,
+ it will fall back to an equivalent implementation:
+
+ +-------------+----------------------------------------+
+ | Linux, BSD | ``clock_gettime(3)`` |
+ +-------------+----------------------------------------+
+ | Windows | ``GetTickCount`` or ``GetTickCount64`` |
+ +-------------+----------------------------------------+
+ | OS X | ``mach_absolute_time`` |
+ +-------------+----------------------------------------+
+
+ If no suitable implementation exists for the current platform,
+ attempting to import this module (or to import from it) will
+ cause a ``RuntimeError`` exception to be raised.
+
+
+ Copyright 2014, 2015, 2016 Ori Livneh <ori@wikimedia.org>
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+"""
+import time
+
+
+__all__ = ('monotonic',)
+
+
+try:
+ monotonic = time.monotonic
+except AttributeError:
+ import ctypes
+ import ctypes.util
+ import os
+ import sys
+ import threading
+ try:
+ if sys.platform == 'darwin': # OS X, iOS
+ # See Technical Q&A QA1398 of the Mac Developer Library:
+ # <https://developer.apple.com/library/mac/qa/qa1398/>
+ libc = ctypes.CDLL('/usr/lib/libc.dylib', use_errno=True)
+
+ class mach_timebase_info_data_t(ctypes.Structure):
+ """System timebase info. Defined in <mach/mach_time.h>."""
+ _fields_ = (('numer', ctypes.c_uint32),
+ ('denom', ctypes.c_uint32))
+
+ mach_absolute_time = libc.mach_absolute_time
+ mach_absolute_time.restype = ctypes.c_uint64
+
+ timebase = mach_timebase_info_data_t()
+ libc.mach_timebase_info(ctypes.byref(timebase))
+ ticks_per_second = timebase.numer / timebase.denom * 1.0e9
+
+ def monotonic():
+ """Monotonic clock, cannot go backward."""
+ return mach_absolute_time() / ticks_per_second
+
+ elif sys.platform.startswith('win32') or sys.platform.startswith('cygwin'):
+ if sys.platform.startswith('cygwin'):
+ # Note: cygwin implements clock_gettime (CLOCK_MONOTONIC = 4) since
+ # version 1.7.6. Using raw WinAPI for maximum version compatibility.
+
+ # Ugly hack using the wrong calling convention (in 32-bit mode)
+ # because ctypes has no windll under cygwin (and it also seems that
+ # the code letting you select stdcall in _ctypes doesn't exist under
+ # the preprocessor definitions relevant to cygwin).
+ # This is 'safe' because:
+ # 1. The ABI of GetTickCount and GetTickCount64 is identical for
+ # both calling conventions because they both have no parameters.
+ # 2. libffi masks the problem because after making the call it doesn't
+ # touch anything through esp and epilogue code restores a correct
+ # esp from ebp afterwards.
+ try:
+ kernel32 = ctypes.cdll.kernel32
+ except OSError: # 'No such file or directory'
+ kernel32 = ctypes.cdll.LoadLibrary('kernel32.dll')
+ else:
+ kernel32 = ctypes.windll.kernel32
+
+ GetTickCount64 = getattr(kernel32, 'GetTickCount64', None)
+ if GetTickCount64:
+ # Windows Vista / Windows Server 2008 or newer.
+ GetTickCount64.restype = ctypes.c_ulonglong
+
+ def monotonic():
+ """Monotonic clock, cannot go backward."""
+ return GetTickCount64() / 1000.0
+
+ else:
+ # Before Windows Vista.
+ GetTickCount = kernel32.GetTickCount
+ GetTickCount.restype = ctypes.c_uint32
+
+ get_tick_count_lock = threading.Lock()
+ get_tick_count_last_sample = 0
+ get_tick_count_wraparounds = 0
+
+ def monotonic():
+ """Monotonic clock, cannot go backward."""
+ global get_tick_count_last_sample
+ global get_tick_count_wraparounds
+
+ with get_tick_count_lock:
+ current_sample = GetTickCount()
+ if current_sample < get_tick_count_last_sample:
+ get_tick_count_wraparounds += 1
+ get_tick_count_last_sample = current_sample
+
+ final_milliseconds = get_tick_count_wraparounds << 32
+ final_milliseconds += get_tick_count_last_sample
+ return final_milliseconds / 1000.0
+
+ else:
+ try:
+ clock_gettime = ctypes.CDLL(ctypes.util.find_library('c'),
+ use_errno=True).clock_gettime
+ except Exception:
+ clock_gettime = ctypes.CDLL(ctypes.util.find_library('rt'),
+ use_errno=True).clock_gettime
+
+ class timespec(ctypes.Structure):
+ """Time specification, as described in clock_gettime(3)."""
+ _fields_ = (('tv_sec', ctypes.c_long),
+ ('tv_nsec', ctypes.c_long))
+
+ if sys.platform.startswith('linux'):
+ CLOCK_MONOTONIC = 1
+ elif sys.platform.startswith('freebsd'):
+ CLOCK_MONOTONIC = 4
+ elif sys.platform.startswith('sunos5'):
+ CLOCK_MONOTONIC = 4
+ elif 'bsd' in sys.platform:
+ CLOCK_MONOTONIC = 3
+ elif sys.platform.startswith('aix'):
+ CLOCK_MONOTONIC = ctypes.c_longlong(10)
+
+ def monotonic():
+ """Monotonic clock, cannot go backward."""
+ ts = timespec()
+ if clock_gettime(CLOCK_MONOTONIC, ctypes.pointer(ts)):
+ errno = ctypes.get_errno()
+ raise OSError(errno, os.strerror(errno))
+ return ts.tv_sec + ts.tv_nsec / 1.0e9
+
+ # Perform a sanity-check.
+ if monotonic() - monotonic() > 0:
+ raise ValueError('monotonic() is not monotonic!')
+
+ except Exception as e:
+ raise RuntimeError('no suitable implementation for this system: ' + repr(e))
diff --git a/collectors/python.d.plugin/python_modules/third_party/ordereddict.py b/collectors/python.d.plugin/python_modules/third_party/ordereddict.py
new file mode 100644
index 000000000..589401b8f
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/third_party/ordereddict.py
@@ -0,0 +1,110 @@
+# Copyright (c) 2009 Raymond Hettinger
+#
+# SPDX-License-Identifier: MIT
+
+from UserDict import DictMixin
+
+
+class OrderedDict(dict, DictMixin):
+
+ def __init__(self, *args, **kwds):
+ if len(args) > 1:
+ raise TypeError('expected at most 1 arguments, got %d' % len(args))
+ try:
+ self.__end
+ except AttributeError:
+ self.clear()
+ self.update(*args, **kwds)
+
+ def clear(self):
+ self.__end = end = []
+ end += [None, end, end] # sentinel node for doubly linked list
+ self.__map = {} # key --> [key, prev, next]
+ dict.clear(self)
+
+ def __setitem__(self, key, value):
+ if key not in self:
+ end = self.__end
+ curr = end[1]
+ curr[2] = end[1] = self.__map[key] = [key, curr, end]
+ dict.__setitem__(self, key, value)
+
+ def __delitem__(self, key):
+ dict.__delitem__(self, key)
+ key, prev, next = self.__map.pop(key)
+ prev[2] = next
+ next[1] = prev
+
+ def __iter__(self):
+ end = self.__end
+ curr = end[2]
+ while curr is not end:
+ yield curr[0]
+ curr = curr[2]
+
+ def __reversed__(self):
+ end = self.__end
+ curr = end[1]
+ while curr is not end:
+ yield curr[0]
+ curr = curr[1]
+
+ def popitem(self, last=True):
+ if not self:
+ raise KeyError('dictionary is empty')
+ if last:
+ key = reversed(self).next()
+ else:
+ key = iter(self).next()
+ value = self.pop(key)
+ return key, value
+
+ def __reduce__(self):
+ items = [[k, self[k]] for k in self]
+ tmp = self.__map, self.__end
+ del self.__map, self.__end
+ inst_dict = vars(self).copy()
+ self.__map, self.__end = tmp
+ if inst_dict:
+ return self.__class__, (items,), inst_dict
+ return self.__class__, (items,)
+
+ def keys(self):
+ return list(self)
+
+ setdefault = DictMixin.setdefault
+ update = DictMixin.update
+ pop = DictMixin.pop
+ values = DictMixin.values
+ items = DictMixin.items
+ iterkeys = DictMixin.iterkeys
+ itervalues = DictMixin.itervalues
+ iteritems = DictMixin.iteritems
+
+ def __repr__(self):
+ if not self:
+ return '%s()' % (self.__class__.__name__,)
+ return '%s(%r)' % (self.__class__.__name__, self.items())
+
+ def copy(self):
+ return self.__class__(self)
+
+ @classmethod
+ def fromkeys(cls, iterable, value=None):
+ d = cls()
+ for key in iterable:
+ d[key] = value
+ return d
+
+ def __eq__(self, other):
+ if isinstance(other, OrderedDict):
+ if len(self) != len(other):
+ return False
+ for p, q in zip(self.items(), other.items()):
+ if p != q:
+ return False
+ return True
+ return dict.__eq__(self, other)
+
+ def __ne__(self, other):
+ return not self == other
diff --git a/collectors/python.d.plugin/python_modules/urllib3/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/__init__.py
new file mode 100644
index 000000000..3add84816
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/__init__.py
@@ -0,0 +1,98 @@
+# SPDX-License-Identifier: MIT
+"""
+urllib3 - Thread-safe connection pooling and re-using.
+"""
+
+from __future__ import absolute_import
+import warnings
+
+from .connectionpool import (
+ HTTPConnectionPool,
+ HTTPSConnectionPool,
+ connection_from_url
+)
+
+from . import exceptions
+from .filepost import encode_multipart_formdata
+from .poolmanager import PoolManager, ProxyManager, proxy_from_url
+from .response import HTTPResponse
+from .util.request import make_headers
+from .util.url import get_host
+from .util.timeout import Timeout
+from .util.retry import Retry
+
+
+# Set default logging handler to avoid "No handler found" warnings.
+import logging
+try: # Python 2.7+
+ from logging import NullHandler
+except ImportError:
+ class NullHandler(logging.Handler):
+ def emit(self, record):
+ pass
+
+__author__ = 'Andrey Petrov (andrey.petrov@shazow.net)'
+__license__ = 'MIT'
+__version__ = '1.21.1'
+
+__all__ = (
+ 'HTTPConnectionPool',
+ 'HTTPSConnectionPool',
+ 'PoolManager',
+ 'ProxyManager',
+ 'HTTPResponse',
+ 'Retry',
+ 'Timeout',
+ 'add_stderr_logger',
+ 'connection_from_url',
+ 'disable_warnings',
+ 'encode_multipart_formdata',
+ 'get_host',
+ 'make_headers',
+ 'proxy_from_url',
+)
+
+logging.getLogger(__name__).addHandler(NullHandler())
+
+
+def add_stderr_logger(level=logging.DEBUG):
+ """
+ Helper for quickly adding a StreamHandler to the logger. Useful for
+ debugging.
+
+ Returns the handler after adding it.
+ """
+ # This method needs to be in this __init__.py to get the __name__ correct
+ # even if urllib3 is vendored within another package.
+ logger = logging.getLogger(__name__)
+ handler = logging.StreamHandler()
+ handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
+ logger.addHandler(handler)
+ logger.setLevel(level)
+ logger.debug('Added a stderr logging handler to logger: %s', __name__)
+ return handler
+
+
+# ... Clean up.
+del NullHandler
+
+
+# All warning filters *must* be appended unless you're really certain that they
+# shouldn't be: otherwise, it's very hard for users to use most Python
+# mechanisms to silence them.
+# SecurityWarning's always go off by default.
+warnings.simplefilter('always', exceptions.SecurityWarning, append=True)
+# SubjectAltNameWarning's should go off once per host
+warnings.simplefilter('default', exceptions.SubjectAltNameWarning, append=True)
+# InsecurePlatformWarning's don't vary between requests, so we keep it default.
+warnings.simplefilter('default', exceptions.InsecurePlatformWarning,
+ append=True)
+# SNIMissingWarnings should go off only once.
+warnings.simplefilter('default', exceptions.SNIMissingWarning, append=True)
+
+
+def disable_warnings(category=exceptions.HTTPWarning):
+ """
+ Helper for quickly disabling all urllib3 warnings.
+ """
+ warnings.simplefilter('ignore', category)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/_collections.py b/collectors/python.d.plugin/python_modules/urllib3/_collections.py
new file mode 100644
index 000000000..c1d2fad36
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/_collections.py
@@ -0,0 +1,315 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+from collections import Mapping, MutableMapping
+try:
+ from threading import RLock
+except ImportError: # Platform-specific: No threads available
+ class RLock:
+ def __enter__(self):
+ pass
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ pass
+
+
+try: # Python 2.7+
+ from collections import OrderedDict
+except ImportError:
+ from .packages.ordered_dict import OrderedDict
+from .packages.six import iterkeys, itervalues, PY3
+
+
+__all__ = ['RecentlyUsedContainer', 'HTTPHeaderDict']
+
+
+_Null = object()
+
+
+class RecentlyUsedContainer(MutableMapping):
+ """
+ Provides a thread-safe dict-like container which maintains up to
+ ``maxsize`` keys while throwing away the least-recently-used keys beyond
+ ``maxsize``.
+
+ :param maxsize:
+ Maximum number of recent elements to retain.
+
+ :param dispose_func:
+ Every time an item is evicted from the container,
+ ``dispose_func(value)`` is called. Callback which will get called
+ """
+
+ ContainerCls = OrderedDict
+
+ def __init__(self, maxsize=10, dispose_func=None):
+ self._maxsize = maxsize
+ self.dispose_func = dispose_func
+
+ self._container = self.ContainerCls()
+ self.lock = RLock()
+
+ def __getitem__(self, key):
+ # Re-insert the item, moving it to the end of the eviction line.
+ with self.lock:
+ item = self._container.pop(key)
+ self._container[key] = item
+ return item
+
+ def __setitem__(self, key, value):
+ evicted_value = _Null
+ with self.lock:
+ # Possibly evict the existing value of 'key'
+ evicted_value = self._container.get(key, _Null)
+ self._container[key] = value
+
+ # If we didn't evict an existing value, we might have to evict the
+ # least recently used item from the beginning of the container.
+ if len(self._container) > self._maxsize:
+ _key, evicted_value = self._container.popitem(last=False)
+
+ if self.dispose_func and evicted_value is not _Null:
+ self.dispose_func(evicted_value)
+
+ def __delitem__(self, key):
+ with self.lock:
+ value = self._container.pop(key)
+
+ if self.dispose_func:
+ self.dispose_func(value)
+
+ def __len__(self):
+ with self.lock:
+ return len(self._container)
+
+ def __iter__(self):
+ raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.')
+
+ def clear(self):
+ with self.lock:
+ # Copy pointers to all values, then wipe the mapping
+ values = list(itervalues(self._container))
+ self._container.clear()
+
+ if self.dispose_func:
+ for value in values:
+ self.dispose_func(value)
+
+ def keys(self):
+ with self.lock:
+ return list(iterkeys(self._container))
+
+
+class HTTPHeaderDict(MutableMapping):
+ """
+ :param headers:
+ An iterable of field-value pairs. Must not contain multiple field names
+ when compared case-insensitively.
+
+ :param kwargs:
+ Additional field-value pairs to pass in to ``dict.update``.
+
+ A ``dict`` like container for storing HTTP Headers.
+
+ Field names are stored and compared case-insensitively in compliance with
+ RFC 7230. Iteration provides the first case-sensitive key seen for each
+ case-insensitive pair.
+
+ Using ``__setitem__`` syntax overwrites fields that compare equal
+ case-insensitively in order to maintain ``dict``'s api. For fields that
+ compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``
+ in a loop.
+
+ If multiple fields that are equal case-insensitively are passed to the
+ constructor or ``.update``, the behavior is undefined and some will be
+ lost.
+
+ >>> headers = HTTPHeaderDict()
+ >>> headers.add('Set-Cookie', 'foo=bar')
+ >>> headers.add('set-cookie', 'baz=quxx')
+ >>> headers['content-length'] = '7'
+ >>> headers['SET-cookie']
+ 'foo=bar, baz=quxx'
+ >>> headers['Content-Length']
+ '7'
+ """
+
+ def __init__(self, headers=None, **kwargs):
+ super(HTTPHeaderDict, self).__init__()
+ self._container = OrderedDict()
+ if headers is not None:
+ if isinstance(headers, HTTPHeaderDict):
+ self._copy_from(headers)
+ else:
+ self.extend(headers)
+ if kwargs:
+ self.extend(kwargs)
+
+ def __setitem__(self, key, val):
+ self._container[key.lower()] = [key, val]
+ return self._container[key.lower()]
+
+ def __getitem__(self, key):
+ val = self._container[key.lower()]
+ return ', '.join(val[1:])
+
+ def __delitem__(self, key):
+ del self._container[key.lower()]
+
+ def __contains__(self, key):
+ return key.lower() in self._container
+
+ def __eq__(self, other):
+ if not isinstance(other, Mapping) and not hasattr(other, 'keys'):
+ return False
+ if not isinstance(other, type(self)):
+ other = type(self)(other)
+ return (dict((k.lower(), v) for k, v in self.itermerged()) ==
+ dict((k.lower(), v) for k, v in other.itermerged()))
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ if not PY3: # Python 2
+ iterkeys = MutableMapping.iterkeys
+ itervalues = MutableMapping.itervalues
+
+ __marker = object()
+
+ def __len__(self):
+ return len(self._container)
+
+ def __iter__(self):
+ # Only provide the originally cased names
+ for vals in self._container.values():
+ yield vals[0]
+
+ def pop(self, key, default=__marker):
+ '''D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
+ If key is not found, d is returned if given, otherwise KeyError is raised.
+ '''
+ # Using the MutableMapping function directly fails due to the private marker.
+ # Using ordinary dict.pop would expose the internal structures.
+ # So let's reinvent the wheel.
+ try:
+ value = self[key]
+ except KeyError:
+ if default is self.__marker:
+ raise
+ return default
+ else:
+ del self[key]
+ return value
+
+ def discard(self, key):
+ try:
+ del self[key]
+ except KeyError:
+ pass
+
+ def add(self, key, val):
+ """Adds a (name, value) pair, doesn't overwrite the value if it already
+ exists.
+
+ >>> headers = HTTPHeaderDict(foo='bar')
+ >>> headers.add('Foo', 'baz')
+ >>> headers['foo']
+ 'bar, baz'
+ """
+ key_lower = key.lower()
+ new_vals = [key, val]
+ # Keep the common case aka no item present as fast as possible
+ vals = self._container.setdefault(key_lower, new_vals)
+ if new_vals is not vals:
+ vals.append(val)
+
+ def extend(self, *args, **kwargs):
+ """Generic import function for any type of header-like object.
+ Adapted version of MutableMapping.update in order to insert items
+ with self.add instead of self.__setitem__
+ """
+ if len(args) > 1:
+ raise TypeError("extend() takes at most 1 positional "
+ "arguments ({0} given)".format(len(args)))
+ other = args[0] if len(args) >= 1 else ()
+
+ if isinstance(other, HTTPHeaderDict):
+ for key, val in other.iteritems():
+ self.add(key, val)
+ elif isinstance(other, Mapping):
+ for key in other:
+ self.add(key, other[key])
+ elif hasattr(other, "keys"):
+ for key in other.keys():
+ self.add(key, other[key])
+ else:
+ for key, value in other:
+ self.add(key, value)
+
+ for key, value in kwargs.items():
+ self.add(key, value)
+
+ def getlist(self, key):
+ """Returns a list of all the values for the named field. Returns an
+ empty list if the key doesn't exist."""
+ try:
+ vals = self._container[key.lower()]
+ except KeyError:
+ return []
+ else:
+ return vals[1:]
+
+ # Backwards compatibility for httplib
+ getheaders = getlist
+ getallmatchingheaders = getlist
+ iget = getlist
+
+ def __repr__(self):
+ return "%s(%s)" % (type(self).__name__, dict(self.itermerged()))
+
+ def _copy_from(self, other):
+ for key in other:
+ val = other.getlist(key)
+ if isinstance(val, list):
+ # Don't need to convert tuples
+ val = list(val)
+ self._container[key.lower()] = [key] + val
+
+ def copy(self):
+ clone = type(self)()
+ clone._copy_from(self)
+ return clone
+
+ def iteritems(self):
+ """Iterate over all header lines, including duplicate ones."""
+ for key in self:
+ vals = self._container[key.lower()]
+ for val in vals[1:]:
+ yield vals[0], val
+
+ def itermerged(self):
+ """Iterate over all headers, merging duplicate ones together."""
+ for key in self:
+ val = self._container[key.lower()]
+ yield val[0], ', '.join(val[1:])
+
+ def items(self):
+ return list(self.iteritems())
+
+ @classmethod
+ def from_httplib(cls, message): # Python 2
+ """Read headers from a Python 2 httplib message object."""
+ # python2.7 does not expose a proper API for exporting multiheaders
+ # efficiently. This function re-reads raw lines from the message
+ # object and extracts the multiheaders properly.
+ headers = []
+
+ for line in message.headers:
+ if line.startswith((' ', '\t')):
+ key, value = headers[-1]
+ headers[-1] = (key, value + '\r\n' + line.rstrip())
+ continue
+
+ key, value = line.split(':', 1)
+ headers.append((key, value.strip()))
+
+ return cls(headers)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/connection.py b/collectors/python.d.plugin/python_modules/urllib3/connection.py
new file mode 100644
index 000000000..f757493c7
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/connection.py
@@ -0,0 +1,374 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+import datetime
+import logging
+import os
+import sys
+import socket
+from socket import error as SocketError, timeout as SocketTimeout
+import warnings
+from .packages import six
+from .packages.six.moves.http_client import HTTPConnection as _HTTPConnection
+from .packages.six.moves.http_client import HTTPException # noqa: F401
+
+try: # Compiled with SSL?
+ import ssl
+ BaseSSLError = ssl.SSLError
+except (ImportError, AttributeError): # Platform-specific: No SSL.
+ ssl = None
+
+ class BaseSSLError(BaseException):
+ pass
+
+
+try: # Python 3:
+ # Not a no-op, we're adding this to the namespace so it can be imported.
+ ConnectionError = ConnectionError
+except NameError: # Python 2:
+ class ConnectionError(Exception):
+ pass
+
+
+from .exceptions import (
+ NewConnectionError,
+ ConnectTimeoutError,
+ SubjectAltNameWarning,
+ SystemTimeWarning,
+)
+from .packages.ssl_match_hostname import match_hostname, CertificateError
+
+from .util.ssl_ import (
+ resolve_cert_reqs,
+ resolve_ssl_version,
+ assert_fingerprint,
+ create_urllib3_context,
+ ssl_wrap_socket
+)
+
+
+from .util import connection
+
+from ._collections import HTTPHeaderDict
+
+log = logging.getLogger(__name__)
+
+port_by_scheme = {
+ 'http': 80,
+ 'https': 443,
+}
+
+# When updating RECENT_DATE, move it to
+# within two years of the current date, and no
+# earlier than 6 months ago.
+RECENT_DATE = datetime.date(2016, 1, 1)
+
+
+class DummyConnection(object):
+ """Used to detect a failed ConnectionCls import."""
+ pass
+
+
+class HTTPConnection(_HTTPConnection, object):
+ """
+ Based on httplib.HTTPConnection but provides an extra constructor
+ backwards-compatibility layer between older and newer Pythons.
+
+ Additional keyword parameters are used to configure attributes of the connection.
+ Accepted parameters include:
+
+ - ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool`
+ - ``source_address``: Set the source address for the current connection.
+
+ .. note:: This is ignored for Python 2.6. It is only applied for 2.7 and 3.x
+
+ - ``socket_options``: Set specific options on the underlying socket. If not specified, then
+ defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling
+ Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.
+
+ For example, if you wish to enable TCP Keep Alive in addition to the defaults,
+ you might pass::
+
+ HTTPConnection.default_socket_options + [
+ (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
+ ]
+
+ Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).
+ """
+
+ default_port = port_by_scheme['http']
+
+ #: Disable Nagle's algorithm by default.
+ #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``
+ default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
+
+ #: Whether this connection verifies the host's certificate.
+ is_verified = False
+
+ def __init__(self, *args, **kw):
+ if six.PY3: # Python 3
+ kw.pop('strict', None)
+
+ # Pre-set source_address in case we have an older Python like 2.6.
+ self.source_address = kw.get('source_address')
+
+ if sys.version_info < (2, 7): # Python 2.6
+ # _HTTPConnection on Python 2.6 will balk at this keyword arg, but
+ # not newer versions. We can still use it when creating a
+ # connection though, so we pop it *after* we have saved it as
+ # self.source_address.
+ kw.pop('source_address', None)
+
+ #: The socket options provided by the user. If no options are
+ #: provided, we use the default options.
+ self.socket_options = kw.pop('socket_options', self.default_socket_options)
+
+ # Superclass also sets self.source_address in Python 2.7+.
+ _HTTPConnection.__init__(self, *args, **kw)
+
+ def _new_conn(self):
+ """ Establish a socket connection and set nodelay settings on it.
+
+ :return: New socket connection.
+ """
+ extra_kw = {}
+ if self.source_address:
+ extra_kw['source_address'] = self.source_address
+
+ if self.socket_options:
+ extra_kw['socket_options'] = self.socket_options
+
+ try:
+ conn = connection.create_connection(
+ (self.host, self.port), self.timeout, **extra_kw)
+
+ except SocketTimeout as e:
+ raise ConnectTimeoutError(
+ self, "Connection to %s timed out. (connect timeout=%s)" %
+ (self.host, self.timeout))
+
+ except SocketError as e:
+ raise NewConnectionError(
+ self, "Failed to establish a new connection: %s" % e)
+
+ return conn
+
+ def _prepare_conn(self, conn):
+ self.sock = conn
+ # the _tunnel_host attribute was added in python 2.6.3 (via
+ # http://hg.python.org/cpython/rev/0f57b30a152f) so pythons 2.6(0-2) do
+ # not have them.
+ if getattr(self, '_tunnel_host', None):
+ # TODO: Fix tunnel so it doesn't depend on self.sock state.
+ self._tunnel()
+ # Mark this connection as not reusable
+ self.auto_open = 0
+
+ def connect(self):
+ conn = self._new_conn()
+ self._prepare_conn(conn)
+
+ def request_chunked(self, method, url, body=None, headers=None):
+ """
+ Alternative to the common request method, which sends the
+ body with chunked encoding and not as one block
+ """
+ headers = HTTPHeaderDict(headers if headers is not None else {})
+ skip_accept_encoding = 'accept-encoding' in headers
+ skip_host = 'host' in headers
+ self.putrequest(
+ method,
+ url,
+ skip_accept_encoding=skip_accept_encoding,
+ skip_host=skip_host
+ )
+ for header, value in headers.items():
+ self.putheader(header, value)
+ if 'transfer-encoding' not in headers:
+ self.putheader('Transfer-Encoding', 'chunked')
+ self.endheaders()
+
+ if body is not None:
+ stringish_types = six.string_types + (six.binary_type,)
+ if isinstance(body, stringish_types):
+ body = (body,)
+ for chunk in body:
+ if not chunk:
+ continue
+ if not isinstance(chunk, six.binary_type):
+ chunk = chunk.encode('utf8')
+ len_str = hex(len(chunk))[2:]
+ self.send(len_str.encode('utf-8'))
+ self.send(b'\r\n')
+ self.send(chunk)
+ self.send(b'\r\n')
+
+ # After the if clause, to always have a closed body
+ self.send(b'0\r\n\r\n')
+
+
+class HTTPSConnection(HTTPConnection):
+ default_port = port_by_scheme['https']
+
+ ssl_version = None
+
+ def __init__(self, host, port=None, key_file=None, cert_file=None,
+ strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
+ ssl_context=None, **kw):
+
+ HTTPConnection.__init__(self, host, port, strict=strict,
+ timeout=timeout, **kw)
+
+ self.key_file = key_file
+ self.cert_file = cert_file
+ self.ssl_context = ssl_context
+
+ # Required property for Google AppEngine 1.9.0 which otherwise causes
+ # HTTPS requests to go out as HTTP. (See Issue #356)
+ self._protocol = 'https'
+
+ def connect(self):
+ conn = self._new_conn()
+ self._prepare_conn(conn)
+
+ if self.ssl_context is None:
+ self.ssl_context = create_urllib3_context(
+ ssl_version=resolve_ssl_version(None),
+ cert_reqs=resolve_cert_reqs(None),
+ )
+
+ self.sock = ssl_wrap_socket(
+ sock=conn,
+ keyfile=self.key_file,
+ certfile=self.cert_file,
+ ssl_context=self.ssl_context,
+ )
+
+
+class VerifiedHTTPSConnection(HTTPSConnection):
+ """
+ Based on httplib.HTTPSConnection but wraps the socket with
+ SSL certification.
+ """
+ cert_reqs = None
+ ca_certs = None
+ ca_cert_dir = None
+ ssl_version = None
+ assert_fingerprint = None
+
+ def set_cert(self, key_file=None, cert_file=None,
+ cert_reqs=None, ca_certs=None,
+ assert_hostname=None, assert_fingerprint=None,
+ ca_cert_dir=None):
+ """
+ This method should only be called once, before the connection is used.
+ """
+ # If cert_reqs is not provided, we can try to guess. If the user gave
+ # us a cert database, we assume they want to use it: otherwise, if
+ # they gave us an SSL Context object we should use whatever is set for
+ # it.
+ if cert_reqs is None:
+ if ca_certs or ca_cert_dir:
+ cert_reqs = 'CERT_REQUIRED'
+ elif self.ssl_context is not None:
+ cert_reqs = self.ssl_context.verify_mode
+
+ self.key_file = key_file
+ self.cert_file = cert_file
+ self.cert_reqs = cert_reqs
+ self.assert_hostname = assert_hostname
+ self.assert_fingerprint = assert_fingerprint
+ self.ca_certs = ca_certs and os.path.expanduser(ca_certs)
+ self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir)
+
+ def connect(self):
+ # Add certificate verification
+ conn = self._new_conn()
+
+ hostname = self.host
+ if getattr(self, '_tunnel_host', None):
+ # _tunnel_host was added in Python 2.6.3
+ # (See: http://hg.python.org/cpython/rev/0f57b30a152f)
+
+ self.sock = conn
+ # Calls self._set_hostport(), so self.host is
+ # self._tunnel_host below.
+ self._tunnel()
+ # Mark this connection as not reusable
+ self.auto_open = 0
+
+ # Override the host with the one we're requesting data from.
+ hostname = self._tunnel_host
+
+ is_time_off = datetime.date.today() < RECENT_DATE
+ if is_time_off:
+ warnings.warn((
+ 'System time is way off (before {0}). This will probably '
+ 'lead to SSL verification errors').format(RECENT_DATE),
+ SystemTimeWarning
+ )
+
+ # Wrap socket using verification with the root certs in
+ # trusted_root_certs
+ if self.ssl_context is None:
+ self.ssl_context = create_urllib3_context(
+ ssl_version=resolve_ssl_version(self.ssl_version),
+ cert_reqs=resolve_cert_reqs(self.cert_reqs),
+ )
+
+ context = self.ssl_context
+ context.verify_mode = resolve_cert_reqs(self.cert_reqs)
+ self.sock = ssl_wrap_socket(
+ sock=conn,
+ keyfile=self.key_file,
+ certfile=self.cert_file,
+ ca_certs=self.ca_certs,
+ ca_cert_dir=self.ca_cert_dir,
+ server_hostname=hostname,
+ ssl_context=context)
+
+ if self.assert_fingerprint:
+ assert_fingerprint(self.sock.getpeercert(binary_form=True),
+ self.assert_fingerprint)
+ elif context.verify_mode != ssl.CERT_NONE \
+ and not getattr(context, 'check_hostname', False) \
+ and self.assert_hostname is not False:
+ # While urllib3 attempts to always turn off hostname matching from
+ # the TLS library, this cannot always be done. So we check whether
+ # the TLS Library still thinks it's matching hostnames.
+ cert = self.sock.getpeercert()
+ if not cert.get('subjectAltName', ()):
+ warnings.warn((
+ 'Certificate for {0} has no `subjectAltName`, falling back to check for a '
+ '`commonName` for now. This feature is being removed by major browsers and '
+ 'deprecated by RFC 2818. (See https://github.com/shazow/urllib3/issues/497 '
+ 'for details.)'.format(hostname)),
+ SubjectAltNameWarning
+ )
+ _match_hostname(cert, self.assert_hostname or hostname)
+
+ self.is_verified = (
+ context.verify_mode == ssl.CERT_REQUIRED or
+ self.assert_fingerprint is not None
+ )
+
+
+def _match_hostname(cert, asserted_hostname):
+ try:
+ match_hostname(cert, asserted_hostname)
+ except CertificateError as e:
+ log.error(
+ 'Certificate did not match expected hostname: %s. '
+ 'Certificate: %s', asserted_hostname, cert
+ )
+ # Add cert to exception and reraise so client code can inspect
+ # the cert when catching the exception, if they want to
+ e._peer_cert = cert
+ raise
+
+
+if ssl:
+ # Make a copy for testing.
+ UnverifiedHTTPSConnection = HTTPSConnection
+ HTTPSConnection = VerifiedHTTPSConnection
+else:
+ HTTPSConnection = DummyConnection
diff --git a/collectors/python.d.plugin/python_modules/urllib3/connectionpool.py b/collectors/python.d.plugin/python_modules/urllib3/connectionpool.py
new file mode 100644
index 000000000..90e4c86a5
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/connectionpool.py
@@ -0,0 +1,900 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+import errno
+import logging
+import sys
+import warnings
+
+from socket import error as SocketError, timeout as SocketTimeout
+import socket
+
+
+from .exceptions import (
+ ClosedPoolError,
+ ProtocolError,
+ EmptyPoolError,
+ HeaderParsingError,
+ HostChangedError,
+ LocationValueError,
+ MaxRetryError,
+ ProxyError,
+ ReadTimeoutError,
+ SSLError,
+ TimeoutError,
+ InsecureRequestWarning,
+ NewConnectionError,
+)
+from .packages.ssl_match_hostname import CertificateError
+from .packages import six
+from .packages.six.moves import queue
+from .connection import (
+ port_by_scheme,
+ DummyConnection,
+ HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection,
+ HTTPException, BaseSSLError,
+)
+from .request import RequestMethods
+from .response import HTTPResponse
+
+from .util.connection import is_connection_dropped
+from .util.request import set_file_position
+from .util.response import assert_header_parsing
+from .util.retry import Retry
+from .util.timeout import Timeout
+from .util.url import get_host, Url
+
+
+if six.PY2:
+ # Queue is imported for side effects on MS Windows
+ import Queue as _unused_module_Queue # noqa: F401
+
+xrange = six.moves.xrange
+
+log = logging.getLogger(__name__)
+
+_Default = object()
+
+
+# Pool objects
+class ConnectionPool(object):
+ """
+ Base class for all connection pools, such as
+ :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
+ """
+
+ scheme = None
+ QueueCls = queue.LifoQueue
+
+ def __init__(self, host, port=None):
+ if not host:
+ raise LocationValueError("No host specified.")
+
+ self.host = _ipv6_host(host).lower()
+ self.port = port
+
+ def __str__(self):
+ return '%s(host=%r, port=%r)' % (type(self).__name__,
+ self.host, self.port)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.close()
+ # Return False to re-raise any potential exceptions
+ return False
+
+ def close(self):
+ """
+ Close all pooled connections and disable the pool.
+ """
+ pass
+
+
+# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
+_blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK])
+
+
+class HTTPConnectionPool(ConnectionPool, RequestMethods):
+ """
+ Thread-safe connection pool for one host.
+
+ :param host:
+ Host used for this HTTP Connection (e.g. "localhost"), passed into
+ :class:`httplib.HTTPConnection`.
+
+ :param port:
+ Port used for this HTTP Connection (None is equivalent to 80), passed
+ into :class:`httplib.HTTPConnection`.
+
+ :param strict:
+ Causes BadStatusLine to be raised if the status line can't be parsed
+ as a valid HTTP/1.0 or 1.1 status line, passed into
+ :class:`httplib.HTTPConnection`.
+
+ .. note::
+ Only works in Python 2. This parameter is ignored in Python 3.
+
+ :param timeout:
+ Socket timeout in seconds for each individual connection. This can
+ be a float or integer, which sets the timeout for the HTTP request,
+ or an instance of :class:`urllib3.util.Timeout` which gives you more
+ fine-grained control over request timeouts. After the constructor has
+ been parsed, this is always a `urllib3.util.Timeout` object.
+
+ :param maxsize:
+ Number of connections to save that can be reused. More than 1 is useful
+ in multithreaded situations. If ``block`` is set to False, more
+ connections will be created but they will not be saved once they've
+ been used.
+
+ :param block:
+ If set to True, no more than ``maxsize`` connections will be used at
+ a time. When no free connections are available, the call will block
+ until a connection has been released. This is a useful side effect for
+ particular multithreaded situations where one does not want to use more
+ than maxsize connections per host to prevent flooding.
+
+ :param headers:
+ Headers to include with all requests, unless other headers are given
+ explicitly.
+
+ :param retries:
+ Retry configuration to use by default with requests in this pool.
+
+ :param _proxy:
+ Parsed proxy URL, should not be used directly, instead, see
+ :class:`urllib3.connectionpool.ProxyManager`"
+
+ :param _proxy_headers:
+ A dictionary with proxy headers, should not be used directly,
+ instead, see :class:`urllib3.connectionpool.ProxyManager`"
+
+ :param \\**conn_kw:
+ Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,
+ :class:`urllib3.connection.HTTPSConnection` instances.
+ """
+
+ scheme = 'http'
+ ConnectionCls = HTTPConnection
+ ResponseCls = HTTPResponse
+
+ def __init__(self, host, port=None, strict=False,
+ timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False,
+ headers=None, retries=None,
+ _proxy=None, _proxy_headers=None,
+ **conn_kw):
+ ConnectionPool.__init__(self, host, port)
+ RequestMethods.__init__(self, headers)
+
+ self.strict = strict
+
+ if not isinstance(timeout, Timeout):
+ timeout = Timeout.from_float(timeout)
+
+ if retries is None:
+ retries = Retry.DEFAULT
+
+ self.timeout = timeout
+ self.retries = retries
+
+ self.pool = self.QueueCls(maxsize)
+ self.block = block
+
+ self.proxy = _proxy
+ self.proxy_headers = _proxy_headers or {}
+
+ # Fill the queue up so that doing get() on it will block properly
+ for _ in xrange(maxsize):
+ self.pool.put(None)
+
+ # These are mostly for testing and debugging purposes.
+ self.num_connections = 0
+ self.num_requests = 0
+ self.conn_kw = conn_kw
+
+ if self.proxy:
+ # Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
+ # We cannot know if the user has added default socket options, so we cannot replace the
+ # list.
+ self.conn_kw.setdefault('socket_options', [])
+
+ def _new_conn(self):
+ """
+ Return a fresh :class:`HTTPConnection`.
+ """
+ self.num_connections += 1
+ log.debug("Starting new HTTP connection (%d): %s",
+ self.num_connections, self.host)
+
+ conn = self.ConnectionCls(host=self.host, port=self.port,
+ timeout=self.timeout.connect_timeout,
+ strict=self.strict, **self.conn_kw)
+ return conn
+
+ def _get_conn(self, timeout=None):
+ """
+ Get a connection. Will return a pooled connection if one is available.
+
+ If no connections are available and :prop:`.block` is ``False``, then a
+ fresh connection is returned.
+
+ :param timeout:
+ Seconds to wait before giving up and raising
+ :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
+ :prop:`.block` is ``True``.
+ """
+ conn = None
+ try:
+ conn = self.pool.get(block=self.block, timeout=timeout)
+
+ except AttributeError: # self.pool is None
+ raise ClosedPoolError(self, "Pool is closed.")
+
+ except queue.Empty:
+ if self.block:
+ raise EmptyPoolError(self,
+ "Pool reached maximum size and no more "
+ "connections are allowed.")
+ pass # Oh well, we'll create a new connection then
+
+ # If this is a persistent connection, check if it got disconnected
+ if conn and is_connection_dropped(conn):
+ log.debug("Resetting dropped connection: %s", self.host)
+ conn.close()
+ if getattr(conn, 'auto_open', 1) == 0:
+ # This is a proxied connection that has been mutated by
+ # httplib._tunnel() and cannot be reused (since it would
+ # attempt to bypass the proxy)
+ conn = None
+
+ return conn or self._new_conn()
+
+ def _put_conn(self, conn):
+ """
+ Put a connection back into the pool.
+
+ :param conn:
+ Connection object for the current host and port as returned by
+ :meth:`._new_conn` or :meth:`._get_conn`.
+
+ If the pool is already full, the connection is closed and discarded
+ because we exceeded maxsize. If connections are discarded frequently,
+ then maxsize should be increased.
+
+ If the pool is closed, then the connection will be closed and discarded.
+ """
+ try:
+ self.pool.put(conn, block=False)
+ return # Everything is dandy, done.
+ except AttributeError:
+ # self.pool is None.
+ pass
+ except queue.Full:
+ # This should never happen if self.block == True
+ log.warning(
+ "Connection pool is full, discarding connection: %s",
+ self.host)
+
+ # Connection never got put back into the pool, close it.
+ if conn:
+ conn.close()
+
+ def _validate_conn(self, conn):
+ """
+ Called right before a request is made, after the socket is created.
+ """
+ pass
+
+ def _prepare_proxy(self, conn):
+ # Nothing to do for HTTP connections.
+ pass
+
+ def _get_timeout(self, timeout):
+ """ Helper that always returns a :class:`urllib3.util.Timeout` """
+ if timeout is _Default:
+ return self.timeout.clone()
+
+ if isinstance(timeout, Timeout):
+ return timeout.clone()
+ else:
+ # User passed us an int/float. This is for backwards compatibility,
+ # can be removed later
+ return Timeout.from_float(timeout)
+
+ def _raise_timeout(self, err, url, timeout_value):
+ """Is the error actually a timeout? Will raise a ReadTimeout or pass"""
+
+ if isinstance(err, SocketTimeout):
+ raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
+
+ # See the above comment about EAGAIN in Python 3. In Python 2 we have
+ # to specifically catch it and throw the timeout error
+ if hasattr(err, 'errno') and err.errno in _blocking_errnos:
+ raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
+
+ # Catch possible read timeouts thrown as SSL errors. If not the
+ # case, rethrow the original. We need to do this because of:
+ # http://bugs.python.org/issue10272
+ if 'timed out' in str(err) or 'did not complete (read)' in str(err): # Python 2.6
+ raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
+
+ def _make_request(self, conn, method, url, timeout=_Default, chunked=False,
+ **httplib_request_kw):
+ """
+ Perform a request on a given urllib connection object taken from our
+ pool.
+
+ :param conn:
+ a connection from one of our connection pools
+
+ :param timeout:
+ Socket timeout in seconds for the request. This can be a
+ float or integer, which will set the same timeout value for
+ the socket connect and the socket read, or an instance of
+ :class:`urllib3.util.Timeout`, which gives you more fine-grained
+ control over your timeouts.
+ """
+ self.num_requests += 1
+
+ timeout_obj = self._get_timeout(timeout)
+ timeout_obj.start_connect()
+ conn.timeout = timeout_obj.connect_timeout
+
+ # Trigger any extra validation we need to do.
+ try:
+ self._validate_conn(conn)
+ except (SocketTimeout, BaseSSLError) as e:
+ # Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
+ self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
+ raise
+
+ # conn.request() calls httplib.*.request, not the method in
+ # urllib3.request. It also calls makefile (recv) on the socket.
+ if chunked:
+ conn.request_chunked(method, url, **httplib_request_kw)
+ else:
+ conn.request(method, url, **httplib_request_kw)
+
+ # Reset the timeout for the recv() on the socket
+ read_timeout = timeout_obj.read_timeout
+
+ # App Engine doesn't have a sock attr
+ if getattr(conn, 'sock', None):
+ # In Python 3 socket.py will catch EAGAIN and return None when you
+ # try and read into the file pointer created by http.client, which
+ # instead raises a BadStatusLine exception. Instead of catching
+ # the exception and assuming all BadStatusLine exceptions are read
+ # timeouts, check for a zero timeout before making the request.
+ if read_timeout == 0:
+ raise ReadTimeoutError(
+ self, url, "Read timed out. (read timeout=%s)" % read_timeout)
+ if read_timeout is Timeout.DEFAULT_TIMEOUT:
+ conn.sock.settimeout(socket.getdefaulttimeout())
+ else: # None or a value
+ conn.sock.settimeout(read_timeout)
+
+ # Receive the response from the server
+ try:
+ try: # Python 2.7, use buffering of HTTP responses
+ httplib_response = conn.getresponse(buffering=True)
+ except TypeError: # Python 2.6 and older, Python 3
+ try:
+ httplib_response = conn.getresponse()
+ except Exception as e:
+ # Remove the TypeError from the exception chain in Python 3;
+ # otherwise it looks like a programming error was the cause.
+ six.raise_from(e, None)
+ except (SocketTimeout, BaseSSLError, SocketError) as e:
+ self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
+ raise
+
+ # AppEngine doesn't have a version attr.
+ http_version = getattr(conn, '_http_vsn_str', 'HTTP/?')
+ log.debug("%s://%s:%s \"%s %s %s\" %s %s", self.scheme, self.host, self.port,
+ method, url, http_version, httplib_response.status,
+ httplib_response.length)
+
+ try:
+ assert_header_parsing(httplib_response.msg)
+ except HeaderParsingError as hpe: # Platform-specific: Python 3
+ log.warning(
+ 'Failed to parse headers (url=%s): %s',
+ self._absolute_url(url), hpe, exc_info=True)
+
+ return httplib_response
+
+ def _absolute_url(self, path):
+ return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url
+
+ def close(self):
+ """
+ Close all pooled connections and disable the pool.
+ """
+ # Disable access to the pool
+ old_pool, self.pool = self.pool, None
+
+ try:
+ while True:
+ conn = old_pool.get(block=False)
+ if conn:
+ conn.close()
+
+ except queue.Empty:
+ pass # Done.
+
+ def is_same_host(self, url):
+ """
+ Check if the given ``url`` is a member of the same host as this
+ connection pool.
+ """
+ if url.startswith('/'):
+ return True
+
+ # TODO: Add optional support for socket.gethostbyname checking.
+ scheme, host, port = get_host(url)
+
+ host = _ipv6_host(host).lower()
+
+ # Use explicit default port for comparison when none is given
+ if self.port and not port:
+ port = port_by_scheme.get(scheme)
+ elif not self.port and port == port_by_scheme.get(scheme):
+ port = None
+
+ return (scheme, host, port) == (self.scheme, self.host, self.port)
+
+ def urlopen(self, method, url, body=None, headers=None, retries=None,
+ redirect=True, assert_same_host=True, timeout=_Default,
+ pool_timeout=None, release_conn=None, chunked=False,
+ body_pos=None, **response_kw):
+ """
+ Get a connection from the pool and perform an HTTP request. This is the
+ lowest level call for making a request, so you'll need to specify all
+ the raw details.
+
+ .. note::
+
+ More commonly, it's appropriate to use a convenience method provided
+ by :class:`.RequestMethods`, such as :meth:`request`.
+
+ .. note::
+
+ `release_conn` will only behave as expected if
+ `preload_content=False` because we want to make
+ `preload_content=False` the default behaviour someday soon without
+ breaking backwards compatibility.
+
+ :param method:
+ HTTP request method (such as GET, POST, PUT, etc.)
+
+ :param body:
+ Data to send in the request body (useful for creating
+ POST requests, see HTTPConnectionPool.post_url for
+ more convenience).
+
+ :param headers:
+ Dictionary of custom headers to send, such as User-Agent,
+ If-None-Match, etc. If None, pool headers are used. If provided,
+ these headers completely replace any pool-specific headers.
+
+ :param retries:
+ Configure the number of retries to allow before raising a
+ :class:`~urllib3.exceptions.MaxRetryError` exception.
+
+ Pass ``None`` to retry until you receive a response. Pass a
+ :class:`~urllib3.util.retry.Retry` object for fine-grained control
+ over different types of retries.
+ Pass an integer number to retry connection errors that many times,
+ but no other types of errors. Pass zero to never retry.
+
+ If ``False``, then retries are disabled and any exception is raised
+ immediately. Also, instead of raising a MaxRetryError on redirects,
+ the redirect response will be returned.
+
+ :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
+
+ :param redirect:
+ If True, automatically handle redirects (status codes 301, 302,
+ 303, 307, 308). Each redirect counts as a retry. Disabling retries
+ will disable redirect, too.
+
+ :param assert_same_host:
+ If ``True``, will make sure that the host of the pool requests is
+ consistent else will raise HostChangedError. When False, you can
+ use the pool on an HTTP proxy and request foreign hosts.
+
+ :param timeout:
+ If specified, overrides the default timeout for this one
+ request. It may be a float (in seconds) or an instance of
+ :class:`urllib3.util.Timeout`.
+
+ :param pool_timeout:
+ If set and the pool is set to block=True, then this method will
+ block for ``pool_timeout`` seconds and raise EmptyPoolError if no
+ connection is available within the time period.
+
+ :param release_conn:
+ If False, then the urlopen call will not release the connection
+ back into the pool once a response is received (but will release if
+ you read the entire contents of the response such as when
+ `preload_content=True`). This is useful if you're not preloading
+ the response's content immediately. You will need to call
+ ``r.release_conn()`` on the response ``r`` to return the connection
+ back into the pool. If None, it takes the value of
+ ``response_kw.get('preload_content', True)``.
+
+ :param chunked:
+ If True, urllib3 will send the body using chunked transfer
+ encoding. Otherwise, urllib3 will send the body using the standard
+ content-length form. Defaults to False.
+
+ :param int body_pos:
+ Position to seek to in file-like body in the event of a retry or
+ redirect. Typically this won't need to be set because urllib3 will
+ auto-populate the value when needed.
+
+ :param \\**response_kw:
+ Additional parameters are passed to
+ :meth:`urllib3.response.HTTPResponse.from_httplib`
+ """
+ if headers is None:
+ headers = self.headers
+
+ if not isinstance(retries, Retry):
+ retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
+
+ if release_conn is None:
+ release_conn = response_kw.get('preload_content', True)
+
+ # Check host
+ if assert_same_host and not self.is_same_host(url):
+ raise HostChangedError(self, url, retries)
+
+ conn = None
+
+ # Track whether `conn` needs to be released before
+ # returning/raising/recursing. Update this variable if necessary, and
+ # leave `release_conn` constant throughout the function. That way, if
+ # the function recurses, the original value of `release_conn` will be
+ # passed down into the recursive call, and its value will be respected.
+ #
+ # See issue #651 [1] for details.
+ #
+ # [1] <https://github.com/shazow/urllib3/issues/651>
+ release_this_conn = release_conn
+
+ # Merge the proxy headers. Only do this in HTTP. We have to copy the
+ # headers dict so we can safely change it without those changes being
+ # reflected in anyone else's copy.
+ if self.scheme == 'http':
+ headers = headers.copy()
+ headers.update(self.proxy_headers)
+
+ # Must keep the exception bound to a separate variable or else Python 3
+ # complains about UnboundLocalError.
+ err = None
+
+ # Keep track of whether we cleanly exited the except block. This
+ # ensures we do proper cleanup in finally.
+ clean_exit = False
+
+ # Rewind body position, if needed. Record current position
+ # for future rewinds in the event of a redirect/retry.
+ body_pos = set_file_position(body, body_pos)
+
+ try:
+ # Request a connection from the queue.
+ timeout_obj = self._get_timeout(timeout)
+ conn = self._get_conn(timeout=pool_timeout)
+
+ conn.timeout = timeout_obj.connect_timeout
+
+ is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None)
+ if is_new_proxy_conn:
+ self._prepare_proxy(conn)
+
+ # Make the request on the httplib connection object.
+ httplib_response = self._make_request(conn, method, url,
+ timeout=timeout_obj,
+ body=body, headers=headers,
+ chunked=chunked)
+
+ # If we're going to release the connection in ``finally:``, then
+ # the response doesn't need to know about the connection. Otherwise
+ # it will also try to release it and we'll have a double-release
+ # mess.
+ response_conn = conn if not release_conn else None
+
+ # Pass method to Response for length checking
+ response_kw['request_method'] = method
+
+ # Import httplib's response into our own wrapper object
+ response = self.ResponseCls.from_httplib(httplib_response,
+ pool=self,
+ connection=response_conn,
+ retries=retries,
+ **response_kw)
+
+ # Everything went great!
+ clean_exit = True
+
+ except queue.Empty:
+ # Timed out by queue.
+ raise EmptyPoolError(self, "No pool connections are available.")
+
+ except (BaseSSLError, CertificateError) as e:
+ # Close the connection. If a connection is reused on which there
+ # was a Certificate error, the next request will certainly raise
+ # another Certificate error.
+ clean_exit = False
+ raise SSLError(e)
+
+ except SSLError:
+ # Treat SSLError separately from BaseSSLError to preserve
+ # traceback.
+ clean_exit = False
+ raise
+
+ except (TimeoutError, HTTPException, SocketError, ProtocolError) as e:
+ # Discard the connection for these exceptions. It will be
+ # be replaced during the next _get_conn() call.
+ clean_exit = False
+
+ if isinstance(e, (SocketError, NewConnectionError)) and self.proxy:
+ e = ProxyError('Cannot connect to proxy.', e)
+ elif isinstance(e, (SocketError, HTTPException)):
+ e = ProtocolError('Connection aborted.', e)
+
+ retries = retries.increment(method, url, error=e, _pool=self,
+ _stacktrace=sys.exc_info()[2])
+ retries.sleep()
+
+ # Keep track of the error for the retry warning.
+ err = e
+
+ finally:
+ if not clean_exit:
+ # We hit some kind of exception, handled or otherwise. We need
+ # to throw the connection away unless explicitly told not to.
+ # Close the connection, set the variable to None, and make sure
+ # we put the None back in the pool to avoid leaking it.
+ conn = conn and conn.close()
+ release_this_conn = True
+
+ if release_this_conn:
+ # Put the connection back to be reused. If the connection is
+ # expired then it will be None, which will get replaced with a
+ # fresh connection during _get_conn.
+ self._put_conn(conn)
+
+ if not conn:
+ # Try again
+ log.warning("Retrying (%r) after connection "
+ "broken by '%r': %s", retries, err, url)
+ return self.urlopen(method, url, body, headers, retries,
+ redirect, assert_same_host,
+ timeout=timeout, pool_timeout=pool_timeout,
+ release_conn=release_conn, body_pos=body_pos,
+ **response_kw)
+
+ # Handle redirect?
+ redirect_location = redirect and response.get_redirect_location()
+ if redirect_location:
+ if response.status == 303:
+ method = 'GET'
+
+ try:
+ retries = retries.increment(method, url, response=response, _pool=self)
+ except MaxRetryError:
+ if retries.raise_on_redirect:
+ # Release the connection for this response, since we're not
+ # returning it to be released manually.
+ response.release_conn()
+ raise
+ return response
+
+ retries.sleep_for_retry(response)
+ log.debug("Redirecting %s -> %s", url, redirect_location)
+ return self.urlopen(
+ method, redirect_location, body, headers,
+ retries=retries, redirect=redirect,
+ assert_same_host=assert_same_host,
+ timeout=timeout, pool_timeout=pool_timeout,
+ release_conn=release_conn, body_pos=body_pos,
+ **response_kw)
+
+ # Check if we should retry the HTTP response.
+ has_retry_after = bool(response.getheader('Retry-After'))
+ if retries.is_retry(method, response.status, has_retry_after):
+ try:
+ retries = retries.increment(method, url, response=response, _pool=self)
+ except MaxRetryError:
+ if retries.raise_on_status:
+ # Release the connection for this response, since we're not
+ # returning it to be released manually.
+ response.release_conn()
+ raise
+ return response
+ retries.sleep(response)
+ log.debug("Retry: %s", url)
+ return self.urlopen(
+ method, url, body, headers,
+ retries=retries, redirect=redirect,
+ assert_same_host=assert_same_host,
+ timeout=timeout, pool_timeout=pool_timeout,
+ release_conn=release_conn,
+ body_pos=body_pos, **response_kw)
+
+ return response
+
+
+class HTTPSConnectionPool(HTTPConnectionPool):
+ """
+ Same as :class:`.HTTPConnectionPool`, but HTTPS.
+
+ When Python is compiled with the :mod:`ssl` module, then
+ :class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates,
+ instead of :class:`.HTTPSConnection`.
+
+ :class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``,
+ ``assert_hostname`` and ``host`` in this order to verify connections.
+ If ``assert_hostname`` is False, no verification is done.
+
+ The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,
+ ``ca_cert_dir``, and ``ssl_version`` are only used if :mod:`ssl` is
+ available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade
+ the connection socket into an SSL socket.
+ """
+
+ scheme = 'https'
+ ConnectionCls = HTTPSConnection
+
+ def __init__(self, host, port=None,
+ strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1,
+ block=False, headers=None, retries=None,
+ _proxy=None, _proxy_headers=None,
+ key_file=None, cert_file=None, cert_reqs=None,
+ ca_certs=None, ssl_version=None,
+ assert_hostname=None, assert_fingerprint=None,
+ ca_cert_dir=None, **conn_kw):
+
+ HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize,
+ block, headers, retries, _proxy, _proxy_headers,
+ **conn_kw)
+
+ if ca_certs and cert_reqs is None:
+ cert_reqs = 'CERT_REQUIRED'
+
+ self.key_file = key_file
+ self.cert_file = cert_file
+ self.cert_reqs = cert_reqs
+ self.ca_certs = ca_certs
+ self.ca_cert_dir = ca_cert_dir
+ self.ssl_version = ssl_version
+ self.assert_hostname = assert_hostname
+ self.assert_fingerprint = assert_fingerprint
+
+ def _prepare_conn(self, conn):
+ """
+ Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
+ and establish the tunnel if proxy is used.
+ """
+
+ if isinstance(conn, VerifiedHTTPSConnection):
+ conn.set_cert(key_file=self.key_file,
+ cert_file=self.cert_file,
+ cert_reqs=self.cert_reqs,
+ ca_certs=self.ca_certs,
+ ca_cert_dir=self.ca_cert_dir,
+ assert_hostname=self.assert_hostname,
+ assert_fingerprint=self.assert_fingerprint)
+ conn.ssl_version = self.ssl_version
+ return conn
+
+ def _prepare_proxy(self, conn):
+ """
+ Establish tunnel connection early, because otherwise httplib
+ would improperly set Host: header to proxy's IP:port.
+ """
+ # Python 2.7+
+ try:
+ set_tunnel = conn.set_tunnel
+ except AttributeError: # Platform-specific: Python 2.6
+ set_tunnel = conn._set_tunnel
+
+ if sys.version_info <= (2, 6, 4) and not self.proxy_headers: # Python 2.6.4 and older
+ set_tunnel(self.host, self.port)
+ else:
+ set_tunnel(self.host, self.port, self.proxy_headers)
+
+ conn.connect()
+
+ def _new_conn(self):
+ """
+ Return a fresh :class:`httplib.HTTPSConnection`.
+ """
+ self.num_connections += 1
+ log.debug("Starting new HTTPS connection (%d): %s",
+ self.num_connections, self.host)
+
+ if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
+ raise SSLError("Can't connect to HTTPS URL because the SSL "
+ "module is not available.")
+
+ actual_host = self.host
+ actual_port = self.port
+ if self.proxy is not None:
+ actual_host = self.proxy.host
+ actual_port = self.proxy.port
+
+ conn = self.ConnectionCls(host=actual_host, port=actual_port,
+ timeout=self.timeout.connect_timeout,
+ strict=self.strict, **self.conn_kw)
+
+ return self._prepare_conn(conn)
+
+ def _validate_conn(self, conn):
+ """
+ Called right before a request is made, after the socket is created.
+ """
+ super(HTTPSConnectionPool, self)._validate_conn(conn)
+
+ # Force connect early to allow us to validate the connection.
+ if not getattr(conn, 'sock', None): # AppEngine might not have `.sock`
+ conn.connect()
+
+ if not conn.is_verified:
+ warnings.warn((
+ 'Unverified HTTPS request is being made. '
+ 'Adding certificate verification is strongly advised. See: '
+ 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
+ '#ssl-warnings'),
+ InsecureRequestWarning)
+
+
+def connection_from_url(url, **kw):
+ """
+ Given a url, return an :class:`.ConnectionPool` instance of its host.
+
+ This is a shortcut for not having to parse out the scheme, host, and port
+ of the url before creating an :class:`.ConnectionPool` instance.
+
+ :param url:
+ Absolute URL string that must include the scheme. Port is optional.
+
+ :param \\**kw:
+ Passes additional parameters to the constructor of the appropriate
+ :class:`.ConnectionPool`. Useful for specifying things like
+ timeout, maxsize, headers, etc.
+
+ Example::
+
+ >>> conn = connection_from_url('http://google.com/')
+ >>> r = conn.request('GET', '/')
+ """
+ scheme, host, port = get_host(url)
+ port = port or port_by_scheme.get(scheme, 80)
+ if scheme == 'https':
+ return HTTPSConnectionPool(host, port=port, **kw)
+ else:
+ return HTTPConnectionPool(host, port=port, **kw)
+
+
+def _ipv6_host(host):
+ """
+ Process IPv6 address literals
+ """
+
+ # httplib doesn't like it when we include brackets in IPv6 addresses
+ # Specifically, if we include brackets but also pass the port then
+ # httplib crazily doubles up the square brackets on the Host header.
+ # Instead, we need to make sure we never pass ``None`` as the port.
+ # However, for backward compatibility reasons we can't actually
+ # *assert* that. See http://bugs.python.org/issue28539
+ #
+ # Also if an IPv6 address literal has a zone identifier, the
+ # percent sign might be URIencoded, convert it back into ASCII
+ if host.startswith('[') and host.endswith(']'):
+ host = host.replace('%25', '%').strip('[]')
+ return host
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/__init__.py
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/__init__.py
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py
new file mode 100644
index 000000000..bb826673f
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py
@@ -0,0 +1,591 @@
+# SPDX-License-Identifier: MIT
+"""
+This module uses ctypes to bind a whole bunch of functions and constants from
+SecureTransport. The goal here is to provide the low-level API to
+SecureTransport. These are essentially the C-level functions and constants, and
+they're pretty gross to work with.
+
+This code is a bastardised version of the code found in Will Bond's oscrypto
+library. An enormous debt is owed to him for blazing this trail for us. For
+that reason, this code should be considered to be covered both by urllib3's
+license and by oscrypto's:
+
+ Copyright (c) 2015-2016 Will Bond <will@wbond.net>
+
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the "Software"),
+ to deal in the Software without restriction, including without limitation
+ the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ and/or sell copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+"""
+from __future__ import absolute_import
+
+import platform
+from ctypes.util import find_library
+from ctypes import (
+ c_void_p, c_int32, c_char_p, c_size_t, c_byte, c_uint32, c_ulong, c_long,
+ c_bool
+)
+from ctypes import CDLL, POINTER, CFUNCTYPE
+
+
+security_path = find_library('Security')
+if not security_path:
+ raise ImportError('The library Security could not be found')
+
+
+core_foundation_path = find_library('CoreFoundation')
+if not core_foundation_path:
+ raise ImportError('The library CoreFoundation could not be found')
+
+
+version = platform.mac_ver()[0]
+version_info = tuple(map(int, version.split('.')))
+if version_info < (10, 8):
+ raise OSError(
+ 'Only OS X 10.8 and newer are supported, not %s.%s' % (
+ version_info[0], version_info[1]
+ )
+ )
+
+Security = CDLL(security_path, use_errno=True)
+CoreFoundation = CDLL(core_foundation_path, use_errno=True)
+
+Boolean = c_bool
+CFIndex = c_long
+CFStringEncoding = c_uint32
+CFData = c_void_p
+CFString = c_void_p
+CFArray = c_void_p
+CFMutableArray = c_void_p
+CFDictionary = c_void_p
+CFError = c_void_p
+CFType = c_void_p
+CFTypeID = c_ulong
+
+CFTypeRef = POINTER(CFType)
+CFAllocatorRef = c_void_p
+
+OSStatus = c_int32
+
+CFDataRef = POINTER(CFData)
+CFStringRef = POINTER(CFString)
+CFArrayRef = POINTER(CFArray)
+CFMutableArrayRef = POINTER(CFMutableArray)
+CFDictionaryRef = POINTER(CFDictionary)
+CFArrayCallBacks = c_void_p
+CFDictionaryKeyCallBacks = c_void_p
+CFDictionaryValueCallBacks = c_void_p
+
+SecCertificateRef = POINTER(c_void_p)
+SecExternalFormat = c_uint32
+SecExternalItemType = c_uint32
+SecIdentityRef = POINTER(c_void_p)
+SecItemImportExportFlags = c_uint32
+SecItemImportExportKeyParameters = c_void_p
+SecKeychainRef = POINTER(c_void_p)
+SSLProtocol = c_uint32
+SSLCipherSuite = c_uint32
+SSLContextRef = POINTER(c_void_p)
+SecTrustRef = POINTER(c_void_p)
+SSLConnectionRef = c_uint32
+SecTrustResultType = c_uint32
+SecTrustOptionFlags = c_uint32
+SSLProtocolSide = c_uint32
+SSLConnectionType = c_uint32
+SSLSessionOption = c_uint32
+
+
+try:
+ Security.SecItemImport.argtypes = [
+ CFDataRef,
+ CFStringRef,
+ POINTER(SecExternalFormat),
+ POINTER(SecExternalItemType),
+ SecItemImportExportFlags,
+ POINTER(SecItemImportExportKeyParameters),
+ SecKeychainRef,
+ POINTER(CFArrayRef),
+ ]
+ Security.SecItemImport.restype = OSStatus
+
+ Security.SecCertificateGetTypeID.argtypes = []
+ Security.SecCertificateGetTypeID.restype = CFTypeID
+
+ Security.SecIdentityGetTypeID.argtypes = []
+ Security.SecIdentityGetTypeID.restype = CFTypeID
+
+ Security.SecKeyGetTypeID.argtypes = []
+ Security.SecKeyGetTypeID.restype = CFTypeID
+
+ Security.SecCertificateCreateWithData.argtypes = [
+ CFAllocatorRef,
+ CFDataRef
+ ]
+ Security.SecCertificateCreateWithData.restype = SecCertificateRef
+
+ Security.SecCertificateCopyData.argtypes = [
+ SecCertificateRef
+ ]
+ Security.SecCertificateCopyData.restype = CFDataRef
+
+ Security.SecCopyErrorMessageString.argtypes = [
+ OSStatus,
+ c_void_p
+ ]
+ Security.SecCopyErrorMessageString.restype = CFStringRef
+
+ Security.SecIdentityCreateWithCertificate.argtypes = [
+ CFTypeRef,
+ SecCertificateRef,
+ POINTER(SecIdentityRef)
+ ]
+ Security.SecIdentityCreateWithCertificate.restype = OSStatus
+
+ Security.SecKeychainCreate.argtypes = [
+ c_char_p,
+ c_uint32,
+ c_void_p,
+ Boolean,
+ c_void_p,
+ POINTER(SecKeychainRef)
+ ]
+ Security.SecKeychainCreate.restype = OSStatus
+
+ Security.SecKeychainDelete.argtypes = [
+ SecKeychainRef
+ ]
+ Security.SecKeychainDelete.restype = OSStatus
+
+ Security.SecPKCS12Import.argtypes = [
+ CFDataRef,
+ CFDictionaryRef,
+ POINTER(CFArrayRef)
+ ]
+ Security.SecPKCS12Import.restype = OSStatus
+
+ SSLReadFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, c_void_p, POINTER(c_size_t))
+ SSLWriteFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, POINTER(c_byte), POINTER(c_size_t))
+
+ Security.SSLSetIOFuncs.argtypes = [
+ SSLContextRef,
+ SSLReadFunc,
+ SSLWriteFunc
+ ]
+ Security.SSLSetIOFuncs.restype = OSStatus
+
+ Security.SSLSetPeerID.argtypes = [
+ SSLContextRef,
+ c_char_p,
+ c_size_t
+ ]
+ Security.SSLSetPeerID.restype = OSStatus
+
+ Security.SSLSetCertificate.argtypes = [
+ SSLContextRef,
+ CFArrayRef
+ ]
+ Security.SSLSetCertificate.restype = OSStatus
+
+ Security.SSLSetCertificateAuthorities.argtypes = [
+ SSLContextRef,
+ CFTypeRef,
+ Boolean
+ ]
+ Security.SSLSetCertificateAuthorities.restype = OSStatus
+
+ Security.SSLSetConnection.argtypes = [
+ SSLContextRef,
+ SSLConnectionRef
+ ]
+ Security.SSLSetConnection.restype = OSStatus
+
+ Security.SSLSetPeerDomainName.argtypes = [
+ SSLContextRef,
+ c_char_p,
+ c_size_t
+ ]
+ Security.SSLSetPeerDomainName.restype = OSStatus
+
+ Security.SSLHandshake.argtypes = [
+ SSLContextRef
+ ]
+ Security.SSLHandshake.restype = OSStatus
+
+ Security.SSLRead.argtypes = [
+ SSLContextRef,
+ c_char_p,
+ c_size_t,
+ POINTER(c_size_t)
+ ]
+ Security.SSLRead.restype = OSStatus
+
+ Security.SSLWrite.argtypes = [
+ SSLContextRef,
+ c_char_p,
+ c_size_t,
+ POINTER(c_size_t)
+ ]
+ Security.SSLWrite.restype = OSStatus
+
+ Security.SSLClose.argtypes = [
+ SSLContextRef
+ ]
+ Security.SSLClose.restype = OSStatus
+
+ Security.SSLGetNumberSupportedCiphers.argtypes = [
+ SSLContextRef,
+ POINTER(c_size_t)
+ ]
+ Security.SSLGetNumberSupportedCiphers.restype = OSStatus
+
+ Security.SSLGetSupportedCiphers.argtypes = [
+ SSLContextRef,
+ POINTER(SSLCipherSuite),
+ POINTER(c_size_t)
+ ]
+ Security.SSLGetSupportedCiphers.restype = OSStatus
+
+ Security.SSLSetEnabledCiphers.argtypes = [
+ SSLContextRef,
+ POINTER(SSLCipherSuite),
+ c_size_t
+ ]
+ Security.SSLSetEnabledCiphers.restype = OSStatus
+
+ Security.SSLGetNumberEnabledCiphers.argtype = [
+ SSLContextRef,
+ POINTER(c_size_t)
+ ]
+ Security.SSLGetNumberEnabledCiphers.restype = OSStatus
+
+ Security.SSLGetEnabledCiphers.argtypes = [
+ SSLContextRef,
+ POINTER(SSLCipherSuite),
+ POINTER(c_size_t)
+ ]
+ Security.SSLGetEnabledCiphers.restype = OSStatus
+
+ Security.SSLGetNegotiatedCipher.argtypes = [
+ SSLContextRef,
+ POINTER(SSLCipherSuite)
+ ]
+ Security.SSLGetNegotiatedCipher.restype = OSStatus
+
+ Security.SSLGetNegotiatedProtocolVersion.argtypes = [
+ SSLContextRef,
+ POINTER(SSLProtocol)
+ ]
+ Security.SSLGetNegotiatedProtocolVersion.restype = OSStatus
+
+ Security.SSLCopyPeerTrust.argtypes = [
+ SSLContextRef,
+ POINTER(SecTrustRef)
+ ]
+ Security.SSLCopyPeerTrust.restype = OSStatus
+
+ Security.SecTrustSetAnchorCertificates.argtypes = [
+ SecTrustRef,
+ CFArrayRef
+ ]
+ Security.SecTrustSetAnchorCertificates.restype = OSStatus
+
+ Security.SecTrustSetAnchorCertificatesOnly.argstypes = [
+ SecTrustRef,
+ Boolean
+ ]
+ Security.SecTrustSetAnchorCertificatesOnly.restype = OSStatus
+
+ Security.SecTrustEvaluate.argtypes = [
+ SecTrustRef,
+ POINTER(SecTrustResultType)
+ ]
+ Security.SecTrustEvaluate.restype = OSStatus
+
+ Security.SecTrustGetCertificateCount.argtypes = [
+ SecTrustRef
+ ]
+ Security.SecTrustGetCertificateCount.restype = CFIndex
+
+ Security.SecTrustGetCertificateAtIndex.argtypes = [
+ SecTrustRef,
+ CFIndex
+ ]
+ Security.SecTrustGetCertificateAtIndex.restype = SecCertificateRef
+
+ Security.SSLCreateContext.argtypes = [
+ CFAllocatorRef,
+ SSLProtocolSide,
+ SSLConnectionType
+ ]
+ Security.SSLCreateContext.restype = SSLContextRef
+
+ Security.SSLSetSessionOption.argtypes = [
+ SSLContextRef,
+ SSLSessionOption,
+ Boolean
+ ]
+ Security.SSLSetSessionOption.restype = OSStatus
+
+ Security.SSLSetProtocolVersionMin.argtypes = [
+ SSLContextRef,
+ SSLProtocol
+ ]
+ Security.SSLSetProtocolVersionMin.restype = OSStatus
+
+ Security.SSLSetProtocolVersionMax.argtypes = [
+ SSLContextRef,
+ SSLProtocol
+ ]
+ Security.SSLSetProtocolVersionMax.restype = OSStatus
+
+ Security.SecCopyErrorMessageString.argtypes = [
+ OSStatus,
+ c_void_p
+ ]
+ Security.SecCopyErrorMessageString.restype = CFStringRef
+
+ Security.SSLReadFunc = SSLReadFunc
+ Security.SSLWriteFunc = SSLWriteFunc
+ Security.SSLContextRef = SSLContextRef
+ Security.SSLProtocol = SSLProtocol
+ Security.SSLCipherSuite = SSLCipherSuite
+ Security.SecIdentityRef = SecIdentityRef
+ Security.SecKeychainRef = SecKeychainRef
+ Security.SecTrustRef = SecTrustRef
+ Security.SecTrustResultType = SecTrustResultType
+ Security.SecExternalFormat = SecExternalFormat
+ Security.OSStatus = OSStatus
+
+ Security.kSecImportExportPassphrase = CFStringRef.in_dll(
+ Security, 'kSecImportExportPassphrase'
+ )
+ Security.kSecImportItemIdentity = CFStringRef.in_dll(
+ Security, 'kSecImportItemIdentity'
+ )
+
+ # CoreFoundation time!
+ CoreFoundation.CFRetain.argtypes = [
+ CFTypeRef
+ ]
+ CoreFoundation.CFRetain.restype = CFTypeRef
+
+ CoreFoundation.CFRelease.argtypes = [
+ CFTypeRef
+ ]
+ CoreFoundation.CFRelease.restype = None
+
+ CoreFoundation.CFGetTypeID.argtypes = [
+ CFTypeRef
+ ]
+ CoreFoundation.CFGetTypeID.restype = CFTypeID
+
+ CoreFoundation.CFStringCreateWithCString.argtypes = [
+ CFAllocatorRef,
+ c_char_p,
+ CFStringEncoding
+ ]
+ CoreFoundation.CFStringCreateWithCString.restype = CFStringRef
+
+ CoreFoundation.CFStringGetCStringPtr.argtypes = [
+ CFStringRef,
+ CFStringEncoding
+ ]
+ CoreFoundation.CFStringGetCStringPtr.restype = c_char_p
+
+ CoreFoundation.CFStringGetCString.argtypes = [
+ CFStringRef,
+ c_char_p,
+ CFIndex,
+ CFStringEncoding
+ ]
+ CoreFoundation.CFStringGetCString.restype = c_bool
+
+ CoreFoundation.CFDataCreate.argtypes = [
+ CFAllocatorRef,
+ c_char_p,
+ CFIndex
+ ]
+ CoreFoundation.CFDataCreate.restype = CFDataRef
+
+ CoreFoundation.CFDataGetLength.argtypes = [
+ CFDataRef
+ ]
+ CoreFoundation.CFDataGetLength.restype = CFIndex
+
+ CoreFoundation.CFDataGetBytePtr.argtypes = [
+ CFDataRef
+ ]
+ CoreFoundation.CFDataGetBytePtr.restype = c_void_p
+
+ CoreFoundation.CFDictionaryCreate.argtypes = [
+ CFAllocatorRef,
+ POINTER(CFTypeRef),
+ POINTER(CFTypeRef),
+ CFIndex,
+ CFDictionaryKeyCallBacks,
+ CFDictionaryValueCallBacks
+ ]
+ CoreFoundation.CFDictionaryCreate.restype = CFDictionaryRef
+
+ CoreFoundation.CFDictionaryGetValue.argtypes = [
+ CFDictionaryRef,
+ CFTypeRef
+ ]
+ CoreFoundation.CFDictionaryGetValue.restype = CFTypeRef
+
+ CoreFoundation.CFArrayCreate.argtypes = [
+ CFAllocatorRef,
+ POINTER(CFTypeRef),
+ CFIndex,
+ CFArrayCallBacks,
+ ]
+ CoreFoundation.CFArrayCreate.restype = CFArrayRef
+
+ CoreFoundation.CFArrayCreateMutable.argtypes = [
+ CFAllocatorRef,
+ CFIndex,
+ CFArrayCallBacks
+ ]
+ CoreFoundation.CFArrayCreateMutable.restype = CFMutableArrayRef
+
+ CoreFoundation.CFArrayAppendValue.argtypes = [
+ CFMutableArrayRef,
+ c_void_p
+ ]
+ CoreFoundation.CFArrayAppendValue.restype = None
+
+ CoreFoundation.CFArrayGetCount.argtypes = [
+ CFArrayRef
+ ]
+ CoreFoundation.CFArrayGetCount.restype = CFIndex
+
+ CoreFoundation.CFArrayGetValueAtIndex.argtypes = [
+ CFArrayRef,
+ CFIndex
+ ]
+ CoreFoundation.CFArrayGetValueAtIndex.restype = c_void_p
+
+ CoreFoundation.kCFAllocatorDefault = CFAllocatorRef.in_dll(
+ CoreFoundation, 'kCFAllocatorDefault'
+ )
+ CoreFoundation.kCFTypeArrayCallBacks = c_void_p.in_dll(CoreFoundation, 'kCFTypeArrayCallBacks')
+ CoreFoundation.kCFTypeDictionaryKeyCallBacks = c_void_p.in_dll(
+ CoreFoundation, 'kCFTypeDictionaryKeyCallBacks'
+ )
+ CoreFoundation.kCFTypeDictionaryValueCallBacks = c_void_p.in_dll(
+ CoreFoundation, 'kCFTypeDictionaryValueCallBacks'
+ )
+
+ CoreFoundation.CFTypeRef = CFTypeRef
+ CoreFoundation.CFArrayRef = CFArrayRef
+ CoreFoundation.CFStringRef = CFStringRef
+ CoreFoundation.CFDictionaryRef = CFDictionaryRef
+
+except (AttributeError):
+ raise ImportError('Error initializing ctypes')
+
+
+class CFConst(object):
+ """
+ A class object that acts as essentially a namespace for CoreFoundation
+ constants.
+ """
+ kCFStringEncodingUTF8 = CFStringEncoding(0x08000100)
+
+
+class SecurityConst(object):
+ """
+ A class object that acts as essentially a namespace for Security constants.
+ """
+ kSSLSessionOptionBreakOnServerAuth = 0
+
+ kSSLProtocol2 = 1
+ kSSLProtocol3 = 2
+ kTLSProtocol1 = 4
+ kTLSProtocol11 = 7
+ kTLSProtocol12 = 8
+
+ kSSLClientSide = 1
+ kSSLStreamType = 0
+
+ kSecFormatPEMSequence = 10
+
+ kSecTrustResultInvalid = 0
+ kSecTrustResultProceed = 1
+ # This gap is present on purpose: this was kSecTrustResultConfirm, which
+ # is deprecated.
+ kSecTrustResultDeny = 3
+ kSecTrustResultUnspecified = 4
+ kSecTrustResultRecoverableTrustFailure = 5
+ kSecTrustResultFatalTrustFailure = 6
+ kSecTrustResultOtherError = 7
+
+ errSSLProtocol = -9800
+ errSSLWouldBlock = -9803
+ errSSLClosedGraceful = -9805
+ errSSLClosedNoNotify = -9816
+ errSSLClosedAbort = -9806
+
+ errSSLXCertChainInvalid = -9807
+ errSSLCrypto = -9809
+ errSSLInternal = -9810
+ errSSLCertExpired = -9814
+ errSSLCertNotYetValid = -9815
+ errSSLUnknownRootCert = -9812
+ errSSLNoRootCert = -9813
+ errSSLHostNameMismatch = -9843
+ errSSLPeerHandshakeFail = -9824
+ errSSLPeerUserCancelled = -9839
+ errSSLWeakPeerEphemeralDHKey = -9850
+ errSSLServerAuthCompleted = -9841
+ errSSLRecordOverflow = -9847
+
+ errSecVerifyFailed = -67808
+ errSecNoTrustSettings = -25263
+ errSecItemNotFound = -25300
+ errSecInvalidTrustSettings = -25262
+
+ # Cipher suites. We only pick the ones our default cipher string allows.
+ TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = 0xC02C
+ TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = 0xC030
+ TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = 0xC02B
+ TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = 0xC02F
+ TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 = 0x00A3
+ TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = 0x009F
+ TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 = 0x00A2
+ TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = 0x009E
+ TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = 0xC024
+ TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = 0xC028
+ TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = 0xC00A
+ TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = 0xC014
+ TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = 0x006B
+ TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 = 0x006A
+ TLS_DHE_RSA_WITH_AES_256_CBC_SHA = 0x0039
+ TLS_DHE_DSS_WITH_AES_256_CBC_SHA = 0x0038
+ TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = 0xC023
+ TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = 0xC027
+ TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = 0xC009
+ TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = 0xC013
+ TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = 0x0067
+ TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 = 0x0040
+ TLS_DHE_RSA_WITH_AES_128_CBC_SHA = 0x0033
+ TLS_DHE_DSS_WITH_AES_128_CBC_SHA = 0x0032
+ TLS_RSA_WITH_AES_256_GCM_SHA384 = 0x009D
+ TLS_RSA_WITH_AES_128_GCM_SHA256 = 0x009C
+ TLS_RSA_WITH_AES_256_CBC_SHA256 = 0x003D
+ TLS_RSA_WITH_AES_128_CBC_SHA256 = 0x003C
+ TLS_RSA_WITH_AES_256_CBC_SHA = 0x0035
+ TLS_RSA_WITH_AES_128_CBC_SHA = 0x002F
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py
new file mode 100644
index 000000000..0f79a1372
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py
@@ -0,0 +1,344 @@
+# SPDX-License-Identifier: MIT
+"""
+Low-level helpers for the SecureTransport bindings.
+
+These are Python functions that are not directly related to the high-level APIs
+but are necessary to get them to work. They include a whole bunch of low-level
+CoreFoundation messing about and memory management. The concerns in this module
+are almost entirely about trying to avoid memory leaks and providing
+appropriate and useful assistance to the higher-level code.
+"""
+import base64
+import ctypes
+import itertools
+import re
+import os
+import ssl
+import tempfile
+
+from .bindings import Security, CoreFoundation, CFConst
+
+
+# This regular expression is used to grab PEM data out of a PEM bundle.
+_PEM_CERTS_RE = re.compile(
+ b"-----BEGIN CERTIFICATE-----\n(.*?)\n-----END CERTIFICATE-----", re.DOTALL
+)
+
+
+def _cf_data_from_bytes(bytestring):
+ """
+ Given a bytestring, create a CFData object from it. This CFData object must
+ be CFReleased by the caller.
+ """
+ return CoreFoundation.CFDataCreate(
+ CoreFoundation.kCFAllocatorDefault, bytestring, len(bytestring)
+ )
+
+
+def _cf_dictionary_from_tuples(tuples):
+ """
+ Given a list of Python tuples, create an associated CFDictionary.
+ """
+ dictionary_size = len(tuples)
+
+ # We need to get the dictionary keys and values out in the same order.
+ keys = (t[0] for t in tuples)
+ values = (t[1] for t in tuples)
+ cf_keys = (CoreFoundation.CFTypeRef * dictionary_size)(*keys)
+ cf_values = (CoreFoundation.CFTypeRef * dictionary_size)(*values)
+
+ return CoreFoundation.CFDictionaryCreate(
+ CoreFoundation.kCFAllocatorDefault,
+ cf_keys,
+ cf_values,
+ dictionary_size,
+ CoreFoundation.kCFTypeDictionaryKeyCallBacks,
+ CoreFoundation.kCFTypeDictionaryValueCallBacks,
+ )
+
+
+def _cf_string_to_unicode(value):
+ """
+ Creates a Unicode string from a CFString object. Used entirely for error
+ reporting.
+
+ Yes, it annoys me quite a lot that this function is this complex.
+ """
+ value_as_void_p = ctypes.cast(value, ctypes.POINTER(ctypes.c_void_p))
+
+ string = CoreFoundation.CFStringGetCStringPtr(
+ value_as_void_p,
+ CFConst.kCFStringEncodingUTF8
+ )
+ if string is None:
+ buffer = ctypes.create_string_buffer(1024)
+ result = CoreFoundation.CFStringGetCString(
+ value_as_void_p,
+ buffer,
+ 1024,
+ CFConst.kCFStringEncodingUTF8
+ )
+ if not result:
+ raise OSError('Error copying C string from CFStringRef')
+ string = buffer.value
+ if string is not None:
+ string = string.decode('utf-8')
+ return string
+
+
+def _assert_no_error(error, exception_class=None):
+ """
+ Checks the return code and throws an exception if there is an error to
+ report
+ """
+ if error == 0:
+ return
+
+ cf_error_string = Security.SecCopyErrorMessageString(error, None)
+ output = _cf_string_to_unicode(cf_error_string)
+ CoreFoundation.CFRelease(cf_error_string)
+
+ if output is None or output == u'':
+ output = u'OSStatus %s' % error
+
+ if exception_class is None:
+ exception_class = ssl.SSLError
+
+ raise exception_class(output)
+
+
+def _cert_array_from_pem(pem_bundle):
+ """
+ Given a bundle of certs in PEM format, turns them into a CFArray of certs
+ that can be used to validate a cert chain.
+ """
+ der_certs = [
+ base64.b64decode(match.group(1))
+ for match in _PEM_CERTS_RE.finditer(pem_bundle)
+ ]
+ if not der_certs:
+ raise ssl.SSLError("No root certificates specified")
+
+ cert_array = CoreFoundation.CFArrayCreateMutable(
+ CoreFoundation.kCFAllocatorDefault,
+ 0,
+ ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks)
+ )
+ if not cert_array:
+ raise ssl.SSLError("Unable to allocate memory!")
+
+ try:
+ for der_bytes in der_certs:
+ certdata = _cf_data_from_bytes(der_bytes)
+ if not certdata:
+ raise ssl.SSLError("Unable to allocate memory!")
+ cert = Security.SecCertificateCreateWithData(
+ CoreFoundation.kCFAllocatorDefault, certdata
+ )
+ CoreFoundation.CFRelease(certdata)
+ if not cert:
+ raise ssl.SSLError("Unable to build cert object!")
+
+ CoreFoundation.CFArrayAppendValue(cert_array, cert)
+ CoreFoundation.CFRelease(cert)
+ except Exception:
+ # We need to free the array before the exception bubbles further.
+ # We only want to do that if an error occurs: otherwise, the caller
+ # should free.
+ CoreFoundation.CFRelease(cert_array)
+
+ return cert_array
+
+
+def _is_cert(item):
+ """
+ Returns True if a given CFTypeRef is a certificate.
+ """
+ expected = Security.SecCertificateGetTypeID()
+ return CoreFoundation.CFGetTypeID(item) == expected
+
+
+def _is_identity(item):
+ """
+ Returns True if a given CFTypeRef is an identity.
+ """
+ expected = Security.SecIdentityGetTypeID()
+ return CoreFoundation.CFGetTypeID(item) == expected
+
+
+def _temporary_keychain():
+ """
+ This function creates a temporary Mac keychain that we can use to work with
+ credentials. This keychain uses a one-time password and a temporary file to
+ store the data. We expect to have one keychain per socket. The returned
+ SecKeychainRef must be freed by the caller, including calling
+ SecKeychainDelete.
+
+ Returns a tuple of the SecKeychainRef and the path to the temporary
+ directory that contains it.
+ """
+ # Unfortunately, SecKeychainCreate requires a path to a keychain. This
+ # means we cannot use mkstemp to use a generic temporary file. Instead,
+ # we're going to create a temporary directory and a filename to use there.
+ # This filename will be 8 random bytes expanded into base64. We also need
+ # some random bytes to password-protect the keychain we're creating, so we
+ # ask for 40 random bytes.
+ random_bytes = os.urandom(40)
+ filename = base64.b64encode(random_bytes[:8]).decode('utf-8')
+ password = base64.b64encode(random_bytes[8:]) # Must be valid UTF-8
+ tempdirectory = tempfile.mkdtemp()
+
+ keychain_path = os.path.join(tempdirectory, filename).encode('utf-8')
+
+ # We now want to create the keychain itself.
+ keychain = Security.SecKeychainRef()
+ status = Security.SecKeychainCreate(
+ keychain_path,
+ len(password),
+ password,
+ False,
+ None,
+ ctypes.byref(keychain)
+ )
+ _assert_no_error(status)
+
+ # Having created the keychain, we want to pass it off to the caller.
+ return keychain, tempdirectory
+
+
+def _load_items_from_file(keychain, path):
+ """
+ Given a single file, loads all the trust objects from it into arrays and
+ the keychain.
+ Returns a tuple of lists: the first list is a list of identities, the
+ second a list of certs.
+ """
+ certificates = []
+ identities = []
+ result_array = None
+
+ with open(path, 'rb') as f:
+ raw_filedata = f.read()
+
+ try:
+ filedata = CoreFoundation.CFDataCreate(
+ CoreFoundation.kCFAllocatorDefault,
+ raw_filedata,
+ len(raw_filedata)
+ )
+ result_array = CoreFoundation.CFArrayRef()
+ result = Security.SecItemImport(
+ filedata, # cert data
+ None, # Filename, leaving it out for now
+ None, # What the type of the file is, we don't care
+ None, # what's in the file, we don't care
+ 0, # import flags
+ None, # key params, can include passphrase in the future
+ keychain, # The keychain to insert into
+ ctypes.byref(result_array) # Results
+ )
+ _assert_no_error(result)
+
+ # A CFArray is not very useful to us as an intermediary
+ # representation, so we are going to extract the objects we want
+ # and then free the array. We don't need to keep hold of keys: the
+ # keychain already has them!
+ result_count = CoreFoundation.CFArrayGetCount(result_array)
+ for index in range(result_count):
+ item = CoreFoundation.CFArrayGetValueAtIndex(
+ result_array, index
+ )
+ item = ctypes.cast(item, CoreFoundation.CFTypeRef)
+
+ if _is_cert(item):
+ CoreFoundation.CFRetain(item)
+ certificates.append(item)
+ elif _is_identity(item):
+ CoreFoundation.CFRetain(item)
+ identities.append(item)
+ finally:
+ if result_array:
+ CoreFoundation.CFRelease(result_array)
+
+ CoreFoundation.CFRelease(filedata)
+
+ return (identities, certificates)
+
+
+def _load_client_cert_chain(keychain, *paths):
+ """
+ Load certificates and maybe keys from a number of files. Has the end goal
+ of returning a CFArray containing one SecIdentityRef, and then zero or more
+ SecCertificateRef objects, suitable for use as a client certificate trust
+ chain.
+ """
+ # Ok, the strategy.
+ #
+ # This relies on knowing that macOS will not give you a SecIdentityRef
+ # unless you have imported a key into a keychain. This is a somewhat
+ # artificial limitation of macOS (for example, it doesn't necessarily
+ # affect iOS), but there is nothing inside Security.framework that lets you
+ # get a SecIdentityRef without having a key in a keychain.
+ #
+ # So the policy here is we take all the files and iterate them in order.
+ # Each one will use SecItemImport to have one or more objects loaded from
+ # it. We will also point at a keychain that macOS can use to work with the
+ # private key.
+ #
+ # Once we have all the objects, we'll check what we actually have. If we
+ # already have a SecIdentityRef in hand, fab: we'll use that. Otherwise,
+ # we'll take the first certificate (which we assume to be our leaf) and
+ # ask the keychain to give us a SecIdentityRef with that cert's associated
+ # key.
+ #
+ # We'll then return a CFArray containing the trust chain: one
+ # SecIdentityRef and then zero-or-more SecCertificateRef objects. The
+ # responsibility for freeing this CFArray will be with the caller. This
+ # CFArray must remain alive for the entire connection, so in practice it
+ # will be stored with a single SSLSocket, along with the reference to the
+ # keychain.
+ certificates = []
+ identities = []
+
+ # Filter out bad paths.
+ paths = (path for path in paths if path)
+
+ try:
+ for file_path in paths:
+ new_identities, new_certs = _load_items_from_file(
+ keychain, file_path
+ )
+ identities.extend(new_identities)
+ certificates.extend(new_certs)
+
+ # Ok, we have everything. The question is: do we have an identity? If
+ # not, we want to grab one from the first cert we have.
+ if not identities:
+ new_identity = Security.SecIdentityRef()
+ status = Security.SecIdentityCreateWithCertificate(
+ keychain,
+ certificates[0],
+ ctypes.byref(new_identity)
+ )
+ _assert_no_error(status)
+ identities.append(new_identity)
+
+ # We now want to release the original certificate, as we no longer
+ # need it.
+ CoreFoundation.CFRelease(certificates.pop(0))
+
+ # We now need to build a new CFArray that holds the trust chain.
+ trust_chain = CoreFoundation.CFArrayCreateMutable(
+ CoreFoundation.kCFAllocatorDefault,
+ 0,
+ ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks),
+ )
+ for item in itertools.chain(identities, certificates):
+ # ArrayAppendValue does a CFRetain on the item. That's fine,
+ # because the finally block will release our other refs to them.
+ CoreFoundation.CFArrayAppendValue(trust_chain, item)
+
+ return trust_chain
+ finally:
+ for obj in itertools.chain(identities, certificates):
+ CoreFoundation.CFRelease(obj)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py
new file mode 100644
index 000000000..e74589fa8
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py
@@ -0,0 +1,297 @@
+# SPDX-License-Identifier: MIT
+"""
+This module provides a pool manager that uses Google App Engine's
+`URLFetch Service <https://cloud.google.com/appengine/docs/python/urlfetch>`_.
+
+Example usage::
+
+ from urllib3 import PoolManager
+ from urllib3.contrib.appengine import AppEngineManager, is_appengine_sandbox
+
+ if is_appengine_sandbox():
+ # AppEngineManager uses AppEngine's URLFetch API behind the scenes
+ http = AppEngineManager()
+ else:
+ # PoolManager uses a socket-level API behind the scenes
+ http = PoolManager()
+
+ r = http.request('GET', 'https://google.com/')
+
+There are `limitations <https://cloud.google.com/appengine/docs/python/\
+urlfetch/#Python_Quotas_and_limits>`_ to the URLFetch service and it may not be
+the best choice for your application. There are three options for using
+urllib3 on Google App Engine:
+
+1. You can use :class:`AppEngineManager` with URLFetch. URLFetch is
+ cost-effective in many circumstances as long as your usage is within the
+ limitations.
+2. You can use a normal :class:`~urllib3.PoolManager` by enabling sockets.
+ Sockets also have `limitations and restrictions
+ <https://cloud.google.com/appengine/docs/python/sockets/\
+ #limitations-and-restrictions>`_ and have a lower free quota than URLFetch.
+ To use sockets, be sure to specify the following in your ``app.yaml``::
+
+ env_variables:
+ GAE_USE_SOCKETS_HTTPLIB : 'true'
+
+3. If you are using `App Engine Flexible
+<https://cloud.google.com/appengine/docs/flexible/>`_, you can use the standard
+:class:`PoolManager` without any configuration or special environment variables.
+"""
+
+from __future__ import absolute_import
+import logging
+import os
+import warnings
+from ..packages.six.moves.urllib.parse import urljoin
+
+from ..exceptions import (
+ HTTPError,
+ HTTPWarning,
+ MaxRetryError,
+ ProtocolError,
+ TimeoutError,
+ SSLError
+)
+
+from ..packages.six import BytesIO
+from ..request import RequestMethods
+from ..response import HTTPResponse
+from ..util.timeout import Timeout
+from ..util.retry import Retry
+
+try:
+ from google.appengine.api import urlfetch
+except ImportError:
+ urlfetch = None
+
+
+log = logging.getLogger(__name__)
+
+
+class AppEnginePlatformWarning(HTTPWarning):
+ pass
+
+
+class AppEnginePlatformError(HTTPError):
+ pass
+
+
+class AppEngineManager(RequestMethods):
+ """
+ Connection manager for Google App Engine sandbox applications.
+
+ This manager uses the URLFetch service directly instead of using the
+ emulated httplib, and is subject to URLFetch limitations as described in
+ the App Engine documentation `here
+ <https://cloud.google.com/appengine/docs/python/urlfetch>`_.
+
+ Notably it will raise an :class:`AppEnginePlatformError` if:
+ * URLFetch is not available.
+ * If you attempt to use this on App Engine Flexible, as full socket
+ support is available.
+ * If a request size is more than 10 megabytes.
+ * If a response size is more than 32 megabtyes.
+ * If you use an unsupported request method such as OPTIONS.
+
+ Beyond those cases, it will raise normal urllib3 errors.
+ """
+
+ def __init__(self, headers=None, retries=None, validate_certificate=True,
+ urlfetch_retries=True):
+ if not urlfetch:
+ raise AppEnginePlatformError(
+ "URLFetch is not available in this environment.")
+
+ if is_prod_appengine_mvms():
+ raise AppEnginePlatformError(
+ "Use normal urllib3.PoolManager instead of AppEngineManager"
+ "on Managed VMs, as using URLFetch is not necessary in "
+ "this environment.")
+
+ warnings.warn(
+ "urllib3 is using URLFetch on Google App Engine sandbox instead "
+ "of sockets. To use sockets directly instead of URLFetch see "
+ "https://urllib3.readthedocs.io/en/latest/reference/urllib3.contrib.html.",
+ AppEnginePlatformWarning)
+
+ RequestMethods.__init__(self, headers)
+ self.validate_certificate = validate_certificate
+ self.urlfetch_retries = urlfetch_retries
+
+ self.retries = retries or Retry.DEFAULT
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ # Return False to re-raise any potential exceptions
+ return False
+
+ def urlopen(self, method, url, body=None, headers=None,
+ retries=None, redirect=True, timeout=Timeout.DEFAULT_TIMEOUT,
+ **response_kw):
+
+ retries = self._get_retries(retries, redirect)
+
+ try:
+ follow_redirects = (
+ redirect and
+ retries.redirect != 0 and
+ retries.total)
+ response = urlfetch.fetch(
+ url,
+ payload=body,
+ method=method,
+ headers=headers or {},
+ allow_truncated=False,
+ follow_redirects=self.urlfetch_retries and follow_redirects,
+ deadline=self._get_absolute_timeout(timeout),
+ validate_certificate=self.validate_certificate,
+ )
+ except urlfetch.DeadlineExceededError as e:
+ raise TimeoutError(self, e)
+
+ except urlfetch.InvalidURLError as e:
+ if 'too large' in str(e):
+ raise AppEnginePlatformError(
+ "URLFetch request too large, URLFetch only "
+ "supports requests up to 10mb in size.", e)
+ raise ProtocolError(e)
+
+ except urlfetch.DownloadError as e:
+ if 'Too many redirects' in str(e):
+ raise MaxRetryError(self, url, reason=e)
+ raise ProtocolError(e)
+
+ except urlfetch.ResponseTooLargeError as e:
+ raise AppEnginePlatformError(
+ "URLFetch response too large, URLFetch only supports"
+ "responses up to 32mb in size.", e)
+
+ except urlfetch.SSLCertificateError as e:
+ raise SSLError(e)
+
+ except urlfetch.InvalidMethodError as e:
+ raise AppEnginePlatformError(
+ "URLFetch does not support method: %s" % method, e)
+
+ http_response = self._urlfetch_response_to_http_response(
+ response, retries=retries, **response_kw)
+
+ # Handle redirect?
+ redirect_location = redirect and http_response.get_redirect_location()
+ if redirect_location:
+ # Check for redirect response
+ if (self.urlfetch_retries and retries.raise_on_redirect):
+ raise MaxRetryError(self, url, "too many redirects")
+ else:
+ if http_response.status == 303:
+ method = 'GET'
+
+ try:
+ retries = retries.increment(method, url, response=http_response, _pool=self)
+ except MaxRetryError:
+ if retries.raise_on_redirect:
+ raise MaxRetryError(self, url, "too many redirects")
+ return http_response
+
+ retries.sleep_for_retry(http_response)
+ log.debug("Redirecting %s -> %s", url, redirect_location)
+ redirect_url = urljoin(url, redirect_location)
+ return self.urlopen(
+ method, redirect_url, body, headers,
+ retries=retries, redirect=redirect,
+ timeout=timeout, **response_kw)
+
+ # Check if we should retry the HTTP response.
+ has_retry_after = bool(http_response.getheader('Retry-After'))
+ if retries.is_retry(method, http_response.status, has_retry_after):
+ retries = retries.increment(
+ method, url, response=http_response, _pool=self)
+ log.debug("Retry: %s", url)
+ retries.sleep(http_response)
+ return self.urlopen(
+ method, url,
+ body=body, headers=headers,
+ retries=retries, redirect=redirect,
+ timeout=timeout, **response_kw)
+
+ return http_response
+
+ def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw):
+
+ if is_prod_appengine():
+ # Production GAE handles deflate encoding automatically, but does
+ # not remove the encoding header.
+ content_encoding = urlfetch_resp.headers.get('content-encoding')
+
+ if content_encoding == 'deflate':
+ del urlfetch_resp.headers['content-encoding']
+
+ transfer_encoding = urlfetch_resp.headers.get('transfer-encoding')
+ # We have a full response's content,
+ # so let's make sure we don't report ourselves as chunked data.
+ if transfer_encoding == 'chunked':
+ encodings = transfer_encoding.split(",")
+ encodings.remove('chunked')
+ urlfetch_resp.headers['transfer-encoding'] = ','.join(encodings)
+
+ return HTTPResponse(
+ # In order for decoding to work, we must present the content as
+ # a file-like object.
+ body=BytesIO(urlfetch_resp.content),
+ headers=urlfetch_resp.headers,
+ status=urlfetch_resp.status_code,
+ **response_kw
+ )
+
+ def _get_absolute_timeout(self, timeout):
+ if timeout is Timeout.DEFAULT_TIMEOUT:
+ return None # Defer to URLFetch's default.
+ if isinstance(timeout, Timeout):
+ if timeout._read is not None or timeout._connect is not None:
+ warnings.warn(
+ "URLFetch does not support granular timeout settings, "
+ "reverting to total or default URLFetch timeout.",
+ AppEnginePlatformWarning)
+ return timeout.total
+ return timeout
+
+ def _get_retries(self, retries, redirect):
+ if not isinstance(retries, Retry):
+ retries = Retry.from_int(
+ retries, redirect=redirect, default=self.retries)
+
+ if retries.connect or retries.read or retries.redirect:
+ warnings.warn(
+ "URLFetch only supports total retries and does not "
+ "recognize connect, read, or redirect retry parameters.",
+ AppEnginePlatformWarning)
+
+ return retries
+
+
+def is_appengine():
+ return (is_local_appengine() or
+ is_prod_appengine() or
+ is_prod_appengine_mvms())
+
+
+def is_appengine_sandbox():
+ return is_appengine() and not is_prod_appengine_mvms()
+
+
+def is_local_appengine():
+ return ('APPENGINE_RUNTIME' in os.environ and
+ 'Development/' in os.environ['SERVER_SOFTWARE'])
+
+
+def is_prod_appengine():
+ return ('APPENGINE_RUNTIME' in os.environ and
+ 'Google App Engine/' in os.environ['SERVER_SOFTWARE'] and
+ not is_prod_appengine_mvms())
+
+
+def is_prod_appengine_mvms():
+ return os.environ.get('GAE_VM', False) == 'true'
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py
new file mode 100644
index 000000000..3f8c9ebf5
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py
@@ -0,0 +1,113 @@
+# SPDX-License-Identifier: MIT
+"""
+NTLM authenticating pool, contributed by erikcederstran
+
+Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
+"""
+from __future__ import absolute_import
+
+from logging import getLogger
+from ntlm import ntlm
+
+from .. import HTTPSConnectionPool
+from ..packages.six.moves.http_client import HTTPSConnection
+
+
+log = getLogger(__name__)
+
+
+class NTLMConnectionPool(HTTPSConnectionPool):
+ """
+ Implements an NTLM authentication version of an urllib3 connection pool
+ """
+
+ scheme = 'https'
+
+ def __init__(self, user, pw, authurl, *args, **kwargs):
+ """
+ authurl is a random URL on the server that is protected by NTLM.
+ user is the Windows user, probably in the DOMAIN\\username format.
+ pw is the password for the user.
+ """
+ super(NTLMConnectionPool, self).__init__(*args, **kwargs)
+ self.authurl = authurl
+ self.rawuser = user
+ user_parts = user.split('\\', 1)
+ self.domain = user_parts[0].upper()
+ self.user = user_parts[1]
+ self.pw = pw
+
+ def _new_conn(self):
+ # Performs the NTLM handshake that secures the connection. The socket
+ # must be kept open while requests are performed.
+ self.num_connections += 1
+ log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s',
+ self.num_connections, self.host, self.authurl)
+
+ headers = {}
+ headers['Connection'] = 'Keep-Alive'
+ req_header = 'Authorization'
+ resp_header = 'www-authenticate'
+
+ conn = HTTPSConnection(host=self.host, port=self.port)
+
+ # Send negotiation message
+ headers[req_header] = (
+ 'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
+ log.debug('Request headers: %s', headers)
+ conn.request('GET', self.authurl, None, headers)
+ res = conn.getresponse()
+ reshdr = dict(res.getheaders())
+ log.debug('Response status: %s %s', res.status, res.reason)
+ log.debug('Response headers: %s', reshdr)
+ log.debug('Response data: %s [...]', res.read(100))
+
+ # Remove the reference to the socket, so that it can not be closed by
+ # the response object (we want to keep the socket open)
+ res.fp = None
+
+ # Server should respond with a challenge message
+ auth_header_values = reshdr[resp_header].split(', ')
+ auth_header_value = None
+ for s in auth_header_values:
+ if s[:5] == 'NTLM ':
+ auth_header_value = s[5:]
+ if auth_header_value is None:
+ raise Exception('Unexpected %s response header: %s' %
+ (resp_header, reshdr[resp_header]))
+
+ # Send authentication message
+ ServerChallenge, NegotiateFlags = \
+ ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
+ auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
+ self.user,
+ self.domain,
+ self.pw,
+ NegotiateFlags)
+ headers[req_header] = 'NTLM %s' % auth_msg
+ log.debug('Request headers: %s', headers)
+ conn.request('GET', self.authurl, None, headers)
+ res = conn.getresponse()
+ log.debug('Response status: %s %s', res.status, res.reason)
+ log.debug('Response headers: %s', dict(res.getheaders()))
+ log.debug('Response data: %s [...]', res.read()[:100])
+ if res.status != 200:
+ if res.status == 401:
+ raise Exception('Server rejected request: wrong '
+ 'username or password')
+ raise Exception('Wrong server response: %s %s' %
+ (res.status, res.reason))
+
+ res.fp = None
+ log.debug('Connection established')
+ return conn
+
+ def urlopen(self, method, url, body=None, headers=None, retries=3,
+ redirect=True, assert_same_host=True):
+ if headers is None:
+ headers = {}
+ headers['Connection'] = 'Keep-Alive'
+ return super(NTLMConnectionPool, self).urlopen(method, url, body,
+ headers, retries,
+ redirect,
+ assert_same_host)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py
new file mode 100644
index 000000000..8d373507d
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py
@@ -0,0 +1,458 @@
+# SPDX-License-Identifier: MIT
+"""
+SSL with SNI_-support for Python 2. Follow these instructions if you would
+like to verify SSL certificates in Python 2. Note, the default libraries do
+*not* do certificate checking; you need to do additional work to validate
+certificates yourself.
+
+This needs the following packages installed:
+
+* pyOpenSSL (tested with 16.0.0)
+* cryptography (minimum 1.3.4, from pyopenssl)
+* idna (minimum 2.0, from cryptography)
+
+However, pyopenssl depends on cryptography, which depends on idna, so while we
+use all three directly here we end up having relatively few packages required.
+
+You can install them with the following command:
+
+ pip install pyopenssl cryptography idna
+
+To activate certificate checking, call
+:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code
+before you begin making HTTP requests. This can be done in a ``sitecustomize``
+module, or at any other time before your application begins using ``urllib3``,
+like this::
+
+ try:
+ import urllib3.contrib.pyopenssl
+ urllib3.contrib.pyopenssl.inject_into_urllib3()
+ except ImportError:
+ pass
+
+Now you can use :mod:`urllib3` as you normally would, and it will support SNI
+when the required modules are installed.
+
+Activating this module also has the positive side effect of disabling SSL/TLS
+compression in Python 2 (see `CRIME attack`_).
+
+If you want to configure the default list of supported cipher suites, you can
+set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable.
+
+.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication
+.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit)
+"""
+from __future__ import absolute_import
+
+import OpenSSL.SSL
+from cryptography import x509
+from cryptography.hazmat.backends.openssl import backend as openssl_backend
+from cryptography.hazmat.backends.openssl.x509 import _Certificate
+
+from socket import timeout, error as SocketError
+from io import BytesIO
+
+try: # Platform-specific: Python 2
+ from socket import _fileobject
+except ImportError: # Platform-specific: Python 3
+ _fileobject = None
+ from ..packages.backports.makefile import backport_makefile
+
+import logging
+import ssl
+
+try:
+ import six
+except ImportError:
+ from ..packages import six
+
+import sys
+
+from .. import util
+
+__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
+
+# SNI always works.
+HAS_SNI = True
+
+# Map from urllib3 to PyOpenSSL compatible parameter-values.
+_openssl_versions = {
+ ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD,
+ ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
+}
+
+if hasattr(ssl, 'PROTOCOL_TLSv1_1') and hasattr(OpenSSL.SSL, 'TLSv1_1_METHOD'):
+ _openssl_versions[ssl.PROTOCOL_TLSv1_1] = OpenSSL.SSL.TLSv1_1_METHOD
+
+if hasattr(ssl, 'PROTOCOL_TLSv1_2') and hasattr(OpenSSL.SSL, 'TLSv1_2_METHOD'):
+ _openssl_versions[ssl.PROTOCOL_TLSv1_2] = OpenSSL.SSL.TLSv1_2_METHOD
+
+try:
+ _openssl_versions.update({ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD})
+except AttributeError:
+ pass
+
+_stdlib_to_openssl_verify = {
+ ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
+ ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
+ ssl.CERT_REQUIRED:
+ OpenSSL.SSL.VERIFY_PEER + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
+}
+_openssl_to_stdlib_verify = dict(
+ (v, k) for k, v in _stdlib_to_openssl_verify.items()
+)
+
+# OpenSSL will only write 16K at a time
+SSL_WRITE_BLOCKSIZE = 16384
+
+orig_util_HAS_SNI = util.HAS_SNI
+orig_util_SSLContext = util.ssl_.SSLContext
+
+
+log = logging.getLogger(__name__)
+
+
+def inject_into_urllib3():
+ 'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.'
+
+ _validate_dependencies_met()
+
+ util.ssl_.SSLContext = PyOpenSSLContext
+ util.HAS_SNI = HAS_SNI
+ util.ssl_.HAS_SNI = HAS_SNI
+ util.IS_PYOPENSSL = True
+ util.ssl_.IS_PYOPENSSL = True
+
+
+def extract_from_urllib3():
+ 'Undo monkey-patching by :func:`inject_into_urllib3`.'
+
+ util.ssl_.SSLContext = orig_util_SSLContext
+ util.HAS_SNI = orig_util_HAS_SNI
+ util.ssl_.HAS_SNI = orig_util_HAS_SNI
+ util.IS_PYOPENSSL = False
+ util.ssl_.IS_PYOPENSSL = False
+
+
+def _validate_dependencies_met():
+ """
+ Verifies that PyOpenSSL's package-level dependencies have been met.
+ Throws `ImportError` if they are not met.
+ """
+ # Method added in `cryptography==1.1`; not available in older versions
+ from cryptography.x509.extensions import Extensions
+ if getattr(Extensions, "get_extension_for_class", None) is None:
+ raise ImportError("'cryptography' module missing required functionality. "
+ "Try upgrading to v1.3.4 or newer.")
+
+ # pyOpenSSL 0.14 and above use cryptography for OpenSSL bindings. The _x509
+ # attribute is only present on those versions.
+ from OpenSSL.crypto import X509
+ x509 = X509()
+ if getattr(x509, "_x509", None) is None:
+ raise ImportError("'pyOpenSSL' module missing required functionality. "
+ "Try upgrading to v0.14 or newer.")
+
+
+def _dnsname_to_stdlib(name):
+ """
+ Converts a dNSName SubjectAlternativeName field to the form used by the
+ standard library on the given Python version.
+
+ Cryptography produces a dNSName as a unicode string that was idna-decoded
+ from ASCII bytes. We need to idna-encode that string to get it back, and
+ then on Python 3 we also need to convert to unicode via UTF-8 (the stdlib
+ uses PyUnicode_FromStringAndSize on it, which decodes via UTF-8).
+ """
+ def idna_encode(name):
+ """
+ Borrowed wholesale from the Python Cryptography Project. It turns out
+ that we can't just safely call `idna.encode`: it can explode for
+ wildcard names. This avoids that problem.
+ """
+ import idna
+
+ for prefix in [u'*.', u'.']:
+ if name.startswith(prefix):
+ name = name[len(prefix):]
+ return prefix.encode('ascii') + idna.encode(name)
+ return idna.encode(name)
+
+ name = idna_encode(name)
+ if sys.version_info >= (3, 0):
+ name = name.decode('utf-8')
+ return name
+
+
+def get_subj_alt_name(peer_cert):
+ """
+ Given an PyOpenSSL certificate, provides all the subject alternative names.
+ """
+ # Pass the cert to cryptography, which has much better APIs for this.
+ # This is technically using private APIs, but should work across all
+ # relevant versions until PyOpenSSL gets something proper for this.
+ cert = _Certificate(openssl_backend, peer_cert._x509)
+
+ # We want to find the SAN extension. Ask Cryptography to locate it (it's
+ # faster than looping in Python)
+ try:
+ ext = cert.extensions.get_extension_for_class(
+ x509.SubjectAlternativeName
+ ).value
+ except x509.ExtensionNotFound:
+ # No such extension, return the empty list.
+ return []
+ except (x509.DuplicateExtension, x509.UnsupportedExtension,
+ x509.UnsupportedGeneralNameType, UnicodeError) as e:
+ # A problem has been found with the quality of the certificate. Assume
+ # no SAN field is present.
+ log.warning(
+ "A problem was encountered with the certificate that prevented "
+ "urllib3 from finding the SubjectAlternativeName field. This can "
+ "affect certificate validation. The error was %s",
+ e,
+ )
+ return []
+
+ # We want to return dNSName and iPAddress fields. We need to cast the IPs
+ # back to strings because the match_hostname function wants them as
+ # strings.
+ # Sadly the DNS names need to be idna encoded and then, on Python 3, UTF-8
+ # decoded. This is pretty frustrating, but that's what the standard library
+ # does with certificates, and so we need to attempt to do the same.
+ names = [
+ ('DNS', _dnsname_to_stdlib(name))
+ for name in ext.get_values_for_type(x509.DNSName)
+ ]
+ names.extend(
+ ('IP Address', str(name))
+ for name in ext.get_values_for_type(x509.IPAddress)
+ )
+
+ return names
+
+
+class WrappedSocket(object):
+ '''API-compatibility wrapper for Python OpenSSL's Connection-class.
+
+ Note: _makefile_refs, _drop() and _reuse() are needed for the garbage
+ collector of pypy.
+ '''
+
+ def __init__(self, connection, socket, suppress_ragged_eofs=True):
+ self.connection = connection
+ self.socket = socket
+ self.suppress_ragged_eofs = suppress_ragged_eofs
+ self._makefile_refs = 0
+ self._closed = False
+
+ def fileno(self):
+ return self.socket.fileno()
+
+ # Copy-pasted from Python 3.5 source code
+ def _decref_socketios(self):
+ if self._makefile_refs > 0:
+ self._makefile_refs -= 1
+ if self._closed:
+ self.close()
+
+ def recv(self, *args, **kwargs):
+ try:
+ data = self.connection.recv(*args, **kwargs)
+ except OpenSSL.SSL.SysCallError as e:
+ if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
+ return b''
+ else:
+ raise SocketError(str(e))
+ except OpenSSL.SSL.ZeroReturnError as e:
+ if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
+ return b''
+ else:
+ raise
+ except OpenSSL.SSL.WantReadError:
+ rd = util.wait_for_read(self.socket, self.socket.gettimeout())
+ if not rd:
+ raise timeout('The read operation timed out')
+ else:
+ return self.recv(*args, **kwargs)
+ else:
+ return data
+
+ def recv_into(self, *args, **kwargs):
+ try:
+ return self.connection.recv_into(*args, **kwargs)
+ except OpenSSL.SSL.SysCallError as e:
+ if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
+ return 0
+ else:
+ raise SocketError(str(e))
+ except OpenSSL.SSL.ZeroReturnError as e:
+ if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
+ return 0
+ else:
+ raise
+ except OpenSSL.SSL.WantReadError:
+ rd = util.wait_for_read(self.socket, self.socket.gettimeout())
+ if not rd:
+ raise timeout('The read operation timed out')
+ else:
+ return self.recv_into(*args, **kwargs)
+
+ def settimeout(self, timeout):
+ return self.socket.settimeout(timeout)
+
+ def _send_until_done(self, data):
+ while True:
+ try:
+ return self.connection.send(data)
+ except OpenSSL.SSL.WantWriteError:
+ wr = util.wait_for_write(self.socket, self.socket.gettimeout())
+ if not wr:
+ raise timeout()
+ continue
+ except OpenSSL.SSL.SysCallError as e:
+ raise SocketError(str(e))
+
+ def sendall(self, data):
+ total_sent = 0
+ while total_sent < len(data):
+ sent = self._send_until_done(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE])
+ total_sent += sent
+
+ def shutdown(self):
+ # FIXME rethrow compatible exceptions should we ever use this
+ self.connection.shutdown()
+
+ def close(self):
+ if self._makefile_refs < 1:
+ try:
+ self._closed = True
+ return self.connection.close()
+ except OpenSSL.SSL.Error:
+ return
+ else:
+ self._makefile_refs -= 1
+
+ def getpeercert(self, binary_form=False):
+ x509 = self.connection.get_peer_certificate()
+
+ if not x509:
+ return x509
+
+ if binary_form:
+ return OpenSSL.crypto.dump_certificate(
+ OpenSSL.crypto.FILETYPE_ASN1,
+ x509)
+
+ return {
+ 'subject': (
+ (('commonName', x509.get_subject().CN),),
+ ),
+ 'subjectAltName': get_subj_alt_name(x509)
+ }
+
+ def _reuse(self):
+ self._makefile_refs += 1
+
+ def _drop(self):
+ if self._makefile_refs < 1:
+ self.close()
+ else:
+ self._makefile_refs -= 1
+
+
+if _fileobject: # Platform-specific: Python 2
+ def makefile(self, mode, bufsize=-1):
+ self._makefile_refs += 1
+ return _fileobject(self, mode, bufsize, close=True)
+else: # Platform-specific: Python 3
+ makefile = backport_makefile
+
+WrappedSocket.makefile = makefile
+
+
+class PyOpenSSLContext(object):
+ """
+ I am a wrapper class for the PyOpenSSL ``Context`` object. I am responsible
+ for translating the interface of the standard library ``SSLContext`` object
+ to calls into PyOpenSSL.
+ """
+ def __init__(self, protocol):
+ self.protocol = _openssl_versions[protocol]
+ self._ctx = OpenSSL.SSL.Context(self.protocol)
+ self._options = 0
+ self.check_hostname = False
+
+ @property
+ def options(self):
+ return self._options
+
+ @options.setter
+ def options(self, value):
+ self._options = value
+ self._ctx.set_options(value)
+
+ @property
+ def verify_mode(self):
+ return _openssl_to_stdlib_verify[self._ctx.get_verify_mode()]
+
+ @verify_mode.setter
+ def verify_mode(self, value):
+ self._ctx.set_verify(
+ _stdlib_to_openssl_verify[value],
+ _verify_callback
+ )
+
+ def set_default_verify_paths(self):
+ self._ctx.set_default_verify_paths()
+
+ def set_ciphers(self, ciphers):
+ if isinstance(ciphers, six.text_type):
+ ciphers = ciphers.encode('utf-8')
+ self._ctx.set_cipher_list(ciphers)
+
+ def load_verify_locations(self, cafile=None, capath=None, cadata=None):
+ if cafile is not None:
+ cafile = cafile.encode('utf-8')
+ if capath is not None:
+ capath = capath.encode('utf-8')
+ self._ctx.load_verify_locations(cafile, capath)
+ if cadata is not None:
+ self._ctx.load_verify_locations(BytesIO(cadata))
+
+ def load_cert_chain(self, certfile, keyfile=None, password=None):
+ self._ctx.use_certificate_file(certfile)
+ if password is not None:
+ self._ctx.set_passwd_cb(lambda max_length, prompt_twice, userdata: password)
+ self._ctx.use_privatekey_file(keyfile or certfile)
+
+ def wrap_socket(self, sock, server_side=False,
+ do_handshake_on_connect=True, suppress_ragged_eofs=True,
+ server_hostname=None):
+ cnx = OpenSSL.SSL.Connection(self._ctx, sock)
+
+ if isinstance(server_hostname, six.text_type): # Platform-specific: Python 3
+ server_hostname = server_hostname.encode('utf-8')
+
+ if server_hostname is not None:
+ cnx.set_tlsext_host_name(server_hostname)
+
+ cnx.set_connect_state()
+
+ while True:
+ try:
+ cnx.do_handshake()
+ except OpenSSL.SSL.WantReadError:
+ rd = util.wait_for_read(sock, sock.gettimeout())
+ if not rd:
+ raise timeout('select timed out')
+ continue
+ except OpenSSL.SSL.Error as e:
+ raise ssl.SSLError('bad handshake: %r' % e)
+ break
+
+ return WrappedSocket(cnx, sock)
+
+
+def _verify_callback(cnx, x509, err_no, err_depth, return_code):
+ return err_no == 0
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py
new file mode 100644
index 000000000..fcc30118c
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py
@@ -0,0 +1,808 @@
+# SPDX-License-Identifier: MIT
+"""
+SecureTranport support for urllib3 via ctypes.
+
+This makes platform-native TLS available to urllib3 users on macOS without the
+use of a compiler. This is an important feature because the Python Package
+Index is moving to become a TLSv1.2-or-higher server, and the default OpenSSL
+that ships with macOS is not capable of doing TLSv1.2. The only way to resolve
+this is to give macOS users an alternative solution to the problem, and that
+solution is to use SecureTransport.
+
+We use ctypes here because this solution must not require a compiler. That's
+because pip is not allowed to require a compiler either.
+
+This is not intended to be a seriously long-term solution to this problem.
+The hope is that PEP 543 will eventually solve this issue for us, at which
+point we can retire this contrib module. But in the short term, we need to
+solve the impending tire fire that is Python on Mac without this kind of
+contrib module. So...here we are.
+
+To use this module, simply import and inject it::
+
+ import urllib3.contrib.securetransport
+ urllib3.contrib.securetransport.inject_into_urllib3()
+
+Happy TLSing!
+"""
+from __future__ import absolute_import
+
+import contextlib
+import ctypes
+import errno
+import os.path
+import shutil
+import socket
+import ssl
+import threading
+import weakref
+
+from .. import util
+from ._securetransport.bindings import (
+ Security, SecurityConst, CoreFoundation
+)
+from ._securetransport.low_level import (
+ _assert_no_error, _cert_array_from_pem, _temporary_keychain,
+ _load_client_cert_chain
+)
+
+try: # Platform-specific: Python 2
+ from socket import _fileobject
+except ImportError: # Platform-specific: Python 3
+ _fileobject = None
+ from ..packages.backports.makefile import backport_makefile
+
+try:
+ memoryview(b'')
+except NameError:
+ raise ImportError("SecureTransport only works on Pythons with memoryview")
+
+__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
+
+# SNI always works
+HAS_SNI = True
+
+orig_util_HAS_SNI = util.HAS_SNI
+orig_util_SSLContext = util.ssl_.SSLContext
+
+# This dictionary is used by the read callback to obtain a handle to the
+# calling wrapped socket. This is a pretty silly approach, but for now it'll
+# do. I feel like I should be able to smuggle a handle to the wrapped socket
+# directly in the SSLConnectionRef, but for now this approach will work I
+# guess.
+#
+# We need to lock around this structure for inserts, but we don't do it for
+# reads/writes in the callbacks. The reasoning here goes as follows:
+#
+# 1. It is not possible to call into the callbacks before the dictionary is
+# populated, so once in the callback the id must be in the dictionary.
+# 2. The callbacks don't mutate the dictionary, they only read from it, and
+# so cannot conflict with any of the insertions.
+#
+# This is good: if we had to lock in the callbacks we'd drastically slow down
+# the performance of this code.
+_connection_refs = weakref.WeakValueDictionary()
+_connection_ref_lock = threading.Lock()
+
+# Limit writes to 16kB. This is OpenSSL's limit, but we'll cargo-cult it over
+# for no better reason than we need *a* limit, and this one is right there.
+SSL_WRITE_BLOCKSIZE = 16384
+
+# This is our equivalent of util.ssl_.DEFAULT_CIPHERS, but expanded out to
+# individual cipher suites. We need to do this becuase this is how
+# SecureTransport wants them.
+CIPHER_SUITES = [
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+ SecurityConst.TLS_DHE_DSS_WITH_AES_256_GCM_SHA384,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_256_GCM_SHA384,
+ SecurityConst.TLS_DHE_DSS_WITH_AES_128_GCM_SHA256,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_128_GCM_SHA256,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
+ SecurityConst.TLS_DHE_DSS_WITH_AES_256_CBC_SHA256,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
+ SecurityConst.TLS_DHE_DSS_WITH_AES_256_CBC_SHA,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
+ SecurityConst.TLS_DHE_DSS_WITH_AES_128_CBC_SHA256,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
+ SecurityConst.TLS_DHE_DSS_WITH_AES_128_CBC_SHA,
+ SecurityConst.TLS_RSA_WITH_AES_256_GCM_SHA384,
+ SecurityConst.TLS_RSA_WITH_AES_128_GCM_SHA256,
+ SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA256,
+ SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA256,
+ SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA,
+ SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA,
+]
+
+# Basically this is simple: for PROTOCOL_SSLv23 we turn it into a low of
+# TLSv1 and a high of TLSv1.2. For everything else, we pin to that version.
+_protocol_to_min_max = {
+ ssl.PROTOCOL_SSLv23: (SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol12),
+}
+
+if hasattr(ssl, "PROTOCOL_SSLv2"):
+ _protocol_to_min_max[ssl.PROTOCOL_SSLv2] = (
+ SecurityConst.kSSLProtocol2, SecurityConst.kSSLProtocol2
+ )
+if hasattr(ssl, "PROTOCOL_SSLv3"):
+ _protocol_to_min_max[ssl.PROTOCOL_SSLv3] = (
+ SecurityConst.kSSLProtocol3, SecurityConst.kSSLProtocol3
+ )
+if hasattr(ssl, "PROTOCOL_TLSv1"):
+ _protocol_to_min_max[ssl.PROTOCOL_TLSv1] = (
+ SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol1
+ )
+if hasattr(ssl, "PROTOCOL_TLSv1_1"):
+ _protocol_to_min_max[ssl.PROTOCOL_TLSv1_1] = (
+ SecurityConst.kTLSProtocol11, SecurityConst.kTLSProtocol11
+ )
+if hasattr(ssl, "PROTOCOL_TLSv1_2"):
+ _protocol_to_min_max[ssl.PROTOCOL_TLSv1_2] = (
+ SecurityConst.kTLSProtocol12, SecurityConst.kTLSProtocol12
+ )
+if hasattr(ssl, "PROTOCOL_TLS"):
+ _protocol_to_min_max[ssl.PROTOCOL_TLS] = _protocol_to_min_max[ssl.PROTOCOL_SSLv23]
+
+
+def inject_into_urllib3():
+ """
+ Monkey-patch urllib3 with SecureTransport-backed SSL-support.
+ """
+ util.ssl_.SSLContext = SecureTransportContext
+ util.HAS_SNI = HAS_SNI
+ util.ssl_.HAS_SNI = HAS_SNI
+ util.IS_SECURETRANSPORT = True
+ util.ssl_.IS_SECURETRANSPORT = True
+
+
+def extract_from_urllib3():
+ """
+ Undo monkey-patching by :func:`inject_into_urllib3`.
+ """
+ util.ssl_.SSLContext = orig_util_SSLContext
+ util.HAS_SNI = orig_util_HAS_SNI
+ util.ssl_.HAS_SNI = orig_util_HAS_SNI
+ util.IS_SECURETRANSPORT = False
+ util.ssl_.IS_SECURETRANSPORT = False
+
+
+def _read_callback(connection_id, data_buffer, data_length_pointer):
+ """
+ SecureTransport read callback. This is called by ST to request that data
+ be returned from the socket.
+ """
+ wrapped_socket = None
+ try:
+ wrapped_socket = _connection_refs.get(connection_id)
+ if wrapped_socket is None:
+ return SecurityConst.errSSLInternal
+ base_socket = wrapped_socket.socket
+
+ requested_length = data_length_pointer[0]
+
+ timeout = wrapped_socket.gettimeout()
+ error = None
+ read_count = 0
+ buffer = (ctypes.c_char * requested_length).from_address(data_buffer)
+ buffer_view = memoryview(buffer)
+
+ try:
+ while read_count < requested_length:
+ if timeout is None or timeout >= 0:
+ readables = util.wait_for_read([base_socket], timeout)
+ if not readables:
+ raise socket.error(errno.EAGAIN, 'timed out')
+
+ # We need to tell ctypes that we have a buffer that can be
+ # written to. Upsettingly, we do that like this:
+ chunk_size = base_socket.recv_into(
+ buffer_view[read_count:requested_length]
+ )
+ read_count += chunk_size
+ if not chunk_size:
+ if not read_count:
+ return SecurityConst.errSSLClosedGraceful
+ break
+ except (socket.error) as e:
+ error = e.errno
+
+ if error is not None and error != errno.EAGAIN:
+ if error == errno.ECONNRESET:
+ return SecurityConst.errSSLClosedAbort
+ raise
+
+ data_length_pointer[0] = read_count
+
+ if read_count != requested_length:
+ return SecurityConst.errSSLWouldBlock
+
+ return 0
+ except Exception as e:
+ if wrapped_socket is not None:
+ wrapped_socket._exception = e
+ return SecurityConst.errSSLInternal
+
+
+def _write_callback(connection_id, data_buffer, data_length_pointer):
+ """
+ SecureTransport write callback. This is called by ST to request that data
+ actually be sent on the network.
+ """
+ wrapped_socket = None
+ try:
+ wrapped_socket = _connection_refs.get(connection_id)
+ if wrapped_socket is None:
+ return SecurityConst.errSSLInternal
+ base_socket = wrapped_socket.socket
+
+ bytes_to_write = data_length_pointer[0]
+ data = ctypes.string_at(data_buffer, bytes_to_write)
+
+ timeout = wrapped_socket.gettimeout()
+ error = None
+ sent = 0
+
+ try:
+ while sent < bytes_to_write:
+ if timeout is None or timeout >= 0:
+ writables = util.wait_for_write([base_socket], timeout)
+ if not writables:
+ raise socket.error(errno.EAGAIN, 'timed out')
+ chunk_sent = base_socket.send(data)
+ sent += chunk_sent
+
+ # This has some needless copying here, but I'm not sure there's
+ # much value in optimising this data path.
+ data = data[chunk_sent:]
+ except (socket.error) as e:
+ error = e.errno
+
+ if error is not None and error != errno.EAGAIN:
+ if error == errno.ECONNRESET:
+ return SecurityConst.errSSLClosedAbort
+ raise
+
+ data_length_pointer[0] = sent
+ if sent != bytes_to_write:
+ return SecurityConst.errSSLWouldBlock
+
+ return 0
+ except Exception as e:
+ if wrapped_socket is not None:
+ wrapped_socket._exception = e
+ return SecurityConst.errSSLInternal
+
+
+# We need to keep these two objects references alive: if they get GC'd while
+# in use then SecureTransport could attempt to call a function that is in freed
+# memory. That would be...uh...bad. Yeah, that's the word. Bad.
+_read_callback_pointer = Security.SSLReadFunc(_read_callback)
+_write_callback_pointer = Security.SSLWriteFunc(_write_callback)
+
+
+class WrappedSocket(object):
+ """
+ API-compatibility wrapper for Python's OpenSSL wrapped socket object.
+
+ Note: _makefile_refs, _drop(), and _reuse() are needed for the garbage
+ collector of PyPy.
+ """
+ def __init__(self, socket):
+ self.socket = socket
+ self.context = None
+ self._makefile_refs = 0
+ self._closed = False
+ self._exception = None
+ self._keychain = None
+ self._keychain_dir = None
+ self._client_cert_chain = None
+
+ # We save off the previously-configured timeout and then set it to
+ # zero. This is done because we use select and friends to handle the
+ # timeouts, but if we leave the timeout set on the lower socket then
+ # Python will "kindly" call select on that socket again for us. Avoid
+ # that by forcing the timeout to zero.
+ self._timeout = self.socket.gettimeout()
+ self.socket.settimeout(0)
+
+ @contextlib.contextmanager
+ def _raise_on_error(self):
+ """
+ A context manager that can be used to wrap calls that do I/O from
+ SecureTransport. If any of the I/O callbacks hit an exception, this
+ context manager will correctly propagate the exception after the fact.
+ This avoids silently swallowing those exceptions.
+
+ It also correctly forces the socket closed.
+ """
+ self._exception = None
+
+ # We explicitly don't catch around this yield because in the unlikely
+ # event that an exception was hit in the block we don't want to swallow
+ # it.
+ yield
+ if self._exception is not None:
+ exception, self._exception = self._exception, None
+ self.close()
+ raise exception
+
+ def _set_ciphers(self):
+ """
+ Sets up the allowed ciphers. By default this matches the set in
+ util.ssl_.DEFAULT_CIPHERS, at least as supported by macOS. This is done
+ custom and doesn't allow changing at this time, mostly because parsing
+ OpenSSL cipher strings is going to be a freaking nightmare.
+ """
+ ciphers = (Security.SSLCipherSuite * len(CIPHER_SUITES))(*CIPHER_SUITES)
+ result = Security.SSLSetEnabledCiphers(
+ self.context, ciphers, len(CIPHER_SUITES)
+ )
+ _assert_no_error(result)
+
+ def _custom_validate(self, verify, trust_bundle):
+ """
+ Called when we have set custom validation. We do this in two cases:
+ first, when cert validation is entirely disabled; and second, when
+ using a custom trust DB.
+ """
+ # If we disabled cert validation, just say: cool.
+ if not verify:
+ return
+
+ # We want data in memory, so load it up.
+ if os.path.isfile(trust_bundle):
+ with open(trust_bundle, 'rb') as f:
+ trust_bundle = f.read()
+
+ cert_array = None
+ trust = Security.SecTrustRef()
+
+ try:
+ # Get a CFArray that contains the certs we want.
+ cert_array = _cert_array_from_pem(trust_bundle)
+
+ # Ok, now the hard part. We want to get the SecTrustRef that ST has
+ # created for this connection, shove our CAs into it, tell ST to
+ # ignore everything else it knows, and then ask if it can build a
+ # chain. This is a buuuunch of code.
+ result = Security.SSLCopyPeerTrust(
+ self.context, ctypes.byref(trust)
+ )
+ _assert_no_error(result)
+ if not trust:
+ raise ssl.SSLError("Failed to copy trust reference")
+
+ result = Security.SecTrustSetAnchorCertificates(trust, cert_array)
+ _assert_no_error(result)
+
+ result = Security.SecTrustSetAnchorCertificatesOnly(trust, True)
+ _assert_no_error(result)
+
+ trust_result = Security.SecTrustResultType()
+ result = Security.SecTrustEvaluate(
+ trust, ctypes.byref(trust_result)
+ )
+ _assert_no_error(result)
+ finally:
+ if trust:
+ CoreFoundation.CFRelease(trust)
+
+ if cert_array is None:
+ CoreFoundation.CFRelease(cert_array)
+
+ # Ok, now we can look at what the result was.
+ successes = (
+ SecurityConst.kSecTrustResultUnspecified,
+ SecurityConst.kSecTrustResultProceed
+ )
+ if trust_result.value not in successes:
+ raise ssl.SSLError(
+ "certificate verify failed, error code: %d" %
+ trust_result.value
+ )
+
+ def handshake(self,
+ server_hostname,
+ verify,
+ trust_bundle,
+ min_version,
+ max_version,
+ client_cert,
+ client_key,
+ client_key_passphrase):
+ """
+ Actually performs the TLS handshake. This is run automatically by
+ wrapped socket, and shouldn't be needed in user code.
+ """
+ # First, we do the initial bits of connection setup. We need to create
+ # a context, set its I/O funcs, and set the connection reference.
+ self.context = Security.SSLCreateContext(
+ None, SecurityConst.kSSLClientSide, SecurityConst.kSSLStreamType
+ )
+ result = Security.SSLSetIOFuncs(
+ self.context, _read_callback_pointer, _write_callback_pointer
+ )
+ _assert_no_error(result)
+
+ # Here we need to compute the handle to use. We do this by taking the
+ # id of self modulo 2**31 - 1. If this is already in the dictionary, we
+ # just keep incrementing by one until we find a free space.
+ with _connection_ref_lock:
+ handle = id(self) % 2147483647
+ while handle in _connection_refs:
+ handle = (handle + 1) % 2147483647
+ _connection_refs[handle] = self
+
+ result = Security.SSLSetConnection(self.context, handle)
+ _assert_no_error(result)
+
+ # If we have a server hostname, we should set that too.
+ if server_hostname:
+ if not isinstance(server_hostname, bytes):
+ server_hostname = server_hostname.encode('utf-8')
+
+ result = Security.SSLSetPeerDomainName(
+ self.context, server_hostname, len(server_hostname)
+ )
+ _assert_no_error(result)
+
+ # Setup the ciphers.
+ self._set_ciphers()
+
+ # Set the minimum and maximum TLS versions.
+ result = Security.SSLSetProtocolVersionMin(self.context, min_version)
+ _assert_no_error(result)
+ result = Security.SSLSetProtocolVersionMax(self.context, max_version)
+ _assert_no_error(result)
+
+ # If there's a trust DB, we need to use it. We do that by telling
+ # SecureTransport to break on server auth. We also do that if we don't
+ # want to validate the certs at all: we just won't actually do any
+ # authing in that case.
+ if not verify or trust_bundle is not None:
+ result = Security.SSLSetSessionOption(
+ self.context,
+ SecurityConst.kSSLSessionOptionBreakOnServerAuth,
+ True
+ )
+ _assert_no_error(result)
+
+ # If there's a client cert, we need to use it.
+ if client_cert:
+ self._keychain, self._keychain_dir = _temporary_keychain()
+ self._client_cert_chain = _load_client_cert_chain(
+ self._keychain, client_cert, client_key
+ )
+ result = Security.SSLSetCertificate(
+ self.context, self._client_cert_chain
+ )
+ _assert_no_error(result)
+
+ while True:
+ with self._raise_on_error():
+ result = Security.SSLHandshake(self.context)
+
+ if result == SecurityConst.errSSLWouldBlock:
+ raise socket.timeout("handshake timed out")
+ elif result == SecurityConst.errSSLServerAuthCompleted:
+ self._custom_validate(verify, trust_bundle)
+ continue
+ else:
+ _assert_no_error(result)
+ break
+
+ def fileno(self):
+ return self.socket.fileno()
+
+ # Copy-pasted from Python 3.5 source code
+ def _decref_socketios(self):
+ if self._makefile_refs > 0:
+ self._makefile_refs -= 1
+ if self._closed:
+ self.close()
+
+ def recv(self, bufsiz):
+ buffer = ctypes.create_string_buffer(bufsiz)
+ bytes_read = self.recv_into(buffer, bufsiz)
+ data = buffer[:bytes_read]
+ return data
+
+ def recv_into(self, buffer, nbytes=None):
+ # Read short on EOF.
+ if self._closed:
+ return 0
+
+ if nbytes is None:
+ nbytes = len(buffer)
+
+ buffer = (ctypes.c_char * nbytes).from_buffer(buffer)
+ processed_bytes = ctypes.c_size_t(0)
+
+ with self._raise_on_error():
+ result = Security.SSLRead(
+ self.context, buffer, nbytes, ctypes.byref(processed_bytes)
+ )
+
+ # There are some result codes that we want to treat as "not always
+ # errors". Specifically, those are errSSLWouldBlock,
+ # errSSLClosedGraceful, and errSSLClosedNoNotify.
+ if (result == SecurityConst.errSSLWouldBlock):
+ # If we didn't process any bytes, then this was just a time out.
+ # However, we can get errSSLWouldBlock in situations when we *did*
+ # read some data, and in those cases we should just read "short"
+ # and return.
+ if processed_bytes.value == 0:
+ # Timed out, no data read.
+ raise socket.timeout("recv timed out")
+ elif result in (SecurityConst.errSSLClosedGraceful, SecurityConst.errSSLClosedNoNotify):
+ # The remote peer has closed this connection. We should do so as
+ # well. Note that we don't actually return here because in
+ # principle this could actually be fired along with return data.
+ # It's unlikely though.
+ self.close()
+ else:
+ _assert_no_error(result)
+
+ # Ok, we read and probably succeeded. We should return whatever data
+ # was actually read.
+ return processed_bytes.value
+
+ def settimeout(self, timeout):
+ self._timeout = timeout
+
+ def gettimeout(self):
+ return self._timeout
+
+ def send(self, data):
+ processed_bytes = ctypes.c_size_t(0)
+
+ with self._raise_on_error():
+ result = Security.SSLWrite(
+ self.context, data, len(data), ctypes.byref(processed_bytes)
+ )
+
+ if result == SecurityConst.errSSLWouldBlock and processed_bytes.value == 0:
+ # Timed out
+ raise socket.timeout("send timed out")
+ else:
+ _assert_no_error(result)
+
+ # We sent, and probably succeeded. Tell them how much we sent.
+ return processed_bytes.value
+
+ def sendall(self, data):
+ total_sent = 0
+ while total_sent < len(data):
+ sent = self.send(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE])
+ total_sent += sent
+
+ def shutdown(self):
+ with self._raise_on_error():
+ Security.SSLClose(self.context)
+
+ def close(self):
+ # TODO: should I do clean shutdown here? Do I have to?
+ if self._makefile_refs < 1:
+ self._closed = True
+ if self.context:
+ CoreFoundation.CFRelease(self.context)
+ self.context = None
+ if self._client_cert_chain:
+ CoreFoundation.CFRelease(self._client_cert_chain)
+ self._client_cert_chain = None
+ if self._keychain:
+ Security.SecKeychainDelete(self._keychain)
+ CoreFoundation.CFRelease(self._keychain)
+ shutil.rmtree(self._keychain_dir)
+ self._keychain = self._keychain_dir = None
+ return self.socket.close()
+ else:
+ self._makefile_refs -= 1
+
+ def getpeercert(self, binary_form=False):
+ # Urgh, annoying.
+ #
+ # Here's how we do this:
+ #
+ # 1. Call SSLCopyPeerTrust to get hold of the trust object for this
+ # connection.
+ # 2. Call SecTrustGetCertificateAtIndex for index 0 to get the leaf.
+ # 3. To get the CN, call SecCertificateCopyCommonName and process that
+ # string so that it's of the appropriate type.
+ # 4. To get the SAN, we need to do something a bit more complex:
+ # a. Call SecCertificateCopyValues to get the data, requesting
+ # kSecOIDSubjectAltName.
+ # b. Mess about with this dictionary to try to get the SANs out.
+ #
+ # This is gross. Really gross. It's going to be a few hundred LoC extra
+ # just to repeat something that SecureTransport can *already do*. So my
+ # operating assumption at this time is that what we want to do is
+ # instead to just flag to urllib3 that it shouldn't do its own hostname
+ # validation when using SecureTransport.
+ if not binary_form:
+ raise ValueError(
+ "SecureTransport only supports dumping binary certs"
+ )
+ trust = Security.SecTrustRef()
+ certdata = None
+ der_bytes = None
+
+ try:
+ # Grab the trust store.
+ result = Security.SSLCopyPeerTrust(
+ self.context, ctypes.byref(trust)
+ )
+ _assert_no_error(result)
+ if not trust:
+ # Probably we haven't done the handshake yet. No biggie.
+ return None
+
+ cert_count = Security.SecTrustGetCertificateCount(trust)
+ if not cert_count:
+ # Also a case that might happen if we haven't handshaked.
+ # Handshook? Handshaken?
+ return None
+
+ leaf = Security.SecTrustGetCertificateAtIndex(trust, 0)
+ assert leaf
+
+ # Ok, now we want the DER bytes.
+ certdata = Security.SecCertificateCopyData(leaf)
+ assert certdata
+
+ data_length = CoreFoundation.CFDataGetLength(certdata)
+ data_buffer = CoreFoundation.CFDataGetBytePtr(certdata)
+ der_bytes = ctypes.string_at(data_buffer, data_length)
+ finally:
+ if certdata:
+ CoreFoundation.CFRelease(certdata)
+ if trust:
+ CoreFoundation.CFRelease(trust)
+
+ return der_bytes
+
+ def _reuse(self):
+ self._makefile_refs += 1
+
+ def _drop(self):
+ if self._makefile_refs < 1:
+ self.close()
+ else:
+ self._makefile_refs -= 1
+
+
+if _fileobject: # Platform-specific: Python 2
+ def makefile(self, mode, bufsize=-1):
+ self._makefile_refs += 1
+ return _fileobject(self, mode, bufsize, close=True)
+else: # Platform-specific: Python 3
+ def makefile(self, mode="r", buffering=None, *args, **kwargs):
+ # We disable buffering with SecureTransport because it conflicts with
+ # the buffering that ST does internally (see issue #1153 for more).
+ buffering = 0
+ return backport_makefile(self, mode, buffering, *args, **kwargs)
+
+WrappedSocket.makefile = makefile
+
+
+class SecureTransportContext(object):
+ """
+ I am a wrapper class for the SecureTransport library, to translate the
+ interface of the standard library ``SSLContext`` object to calls into
+ SecureTransport.
+ """
+ def __init__(self, protocol):
+ self._min_version, self._max_version = _protocol_to_min_max[protocol]
+ self._options = 0
+ self._verify = False
+ self._trust_bundle = None
+ self._client_cert = None
+ self._client_key = None
+ self._client_key_passphrase = None
+
+ @property
+ def check_hostname(self):
+ """
+ SecureTransport cannot have its hostname checking disabled. For more,
+ see the comment on getpeercert() in this file.
+ """
+ return True
+
+ @check_hostname.setter
+ def check_hostname(self, value):
+ """
+ SecureTransport cannot have its hostname checking disabled. For more,
+ see the comment on getpeercert() in this file.
+ """
+ pass
+
+ @property
+ def options(self):
+ # TODO: Well, crap.
+ #
+ # So this is the bit of the code that is the most likely to cause us
+ # trouble. Essentially we need to enumerate all of the SSL options that
+ # users might want to use and try to see if we can sensibly translate
+ # them, or whether we should just ignore them.
+ return self._options
+
+ @options.setter
+ def options(self, value):
+ # TODO: Update in line with above.
+ self._options = value
+
+ @property
+ def verify_mode(self):
+ return ssl.CERT_REQUIRED if self._verify else ssl.CERT_NONE
+
+ @verify_mode.setter
+ def verify_mode(self, value):
+ self._verify = True if value == ssl.CERT_REQUIRED else False
+
+ def set_default_verify_paths(self):
+ # So, this has to do something a bit weird. Specifically, what it does
+ # is nothing.
+ #
+ # This means that, if we had previously had load_verify_locations
+ # called, this does not undo that. We need to do that because it turns
+ # out that the rest of the urllib3 code will attempt to load the
+ # default verify paths if it hasn't been told about any paths, even if
+ # the context itself was sometime earlier. We resolve that by just
+ # ignoring it.
+ pass
+
+ def load_default_certs(self):
+ return self.set_default_verify_paths()
+
+ def set_ciphers(self, ciphers):
+ # For now, we just require the default cipher string.
+ if ciphers != util.ssl_.DEFAULT_CIPHERS:
+ raise ValueError(
+ "SecureTransport doesn't support custom cipher strings"
+ )
+
+ def load_verify_locations(self, cafile=None, capath=None, cadata=None):
+ # OK, we only really support cadata and cafile.
+ if capath is not None:
+ raise ValueError(
+ "SecureTransport does not support cert directories"
+ )
+
+ self._trust_bundle = cafile or cadata
+
+ def load_cert_chain(self, certfile, keyfile=None, password=None):
+ self._client_cert = certfile
+ self._client_key = keyfile
+ self._client_cert_passphrase = password
+
+ def wrap_socket(self, sock, server_side=False,
+ do_handshake_on_connect=True, suppress_ragged_eofs=True,
+ server_hostname=None):
+ # So, what do we do here? Firstly, we assert some properties. This is a
+ # stripped down shim, so there is some functionality we don't support.
+ # See PEP 543 for the real deal.
+ assert not server_side
+ assert do_handshake_on_connect
+ assert suppress_ragged_eofs
+
+ # Ok, we're good to go. Now we want to create the wrapped socket object
+ # and store it in the appropriate place.
+ wrapped_socket = WrappedSocket(sock)
+
+ # Now we can handshake
+ wrapped_socket.handshake(
+ server_hostname, self._verify, self._trust_bundle,
+ self._min_version, self._max_version, self._client_cert,
+ self._client_key, self._client_key_passphrase
+ )
+ return wrapped_socket
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py
new file mode 100644
index 000000000..1cb79285b
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py
@@ -0,0 +1,189 @@
+# -*- coding: utf-8 -*-
+# SPDX-License-Identifier: MIT
+"""
+This module contains provisional support for SOCKS proxies from within
+urllib3. This module supports SOCKS4 (specifically the SOCKS4A variant) and
+SOCKS5. To enable its functionality, either install PySocks or install this
+module with the ``socks`` extra.
+
+The SOCKS implementation supports the full range of urllib3 features. It also
+supports the following SOCKS features:
+
+- SOCKS4
+- SOCKS4a
+- SOCKS5
+- Usernames and passwords for the SOCKS proxy
+
+Known Limitations:
+
+- Currently PySocks does not support contacting remote websites via literal
+ IPv6 addresses. Any such connection attempt will fail. You must use a domain
+ name.
+- Currently PySocks does not support IPv6 connections to the SOCKS proxy. Any
+ such connection attempt will fail.
+"""
+from __future__ import absolute_import
+
+try:
+ import socks
+except ImportError:
+ import warnings
+ from ..exceptions import DependencyWarning
+
+ warnings.warn((
+ 'SOCKS support in urllib3 requires the installation of optional '
+ 'dependencies: specifically, PySocks. For more information, see '
+ 'https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies'
+ ),
+ DependencyWarning
+ )
+ raise
+
+from socket import error as SocketError, timeout as SocketTimeout
+
+from ..connection import (
+ HTTPConnection, HTTPSConnection
+)
+from ..connectionpool import (
+ HTTPConnectionPool, HTTPSConnectionPool
+)
+from ..exceptions import ConnectTimeoutError, NewConnectionError
+from ..poolmanager import PoolManager
+from ..util.url import parse_url
+
+try:
+ import ssl
+except ImportError:
+ ssl = None
+
+
+class SOCKSConnection(HTTPConnection):
+ """
+ A plain-text HTTP connection that connects via a SOCKS proxy.
+ """
+ def __init__(self, *args, **kwargs):
+ self._socks_options = kwargs.pop('_socks_options')
+ super(SOCKSConnection, self).__init__(*args, **kwargs)
+
+ def _new_conn(self):
+ """
+ Establish a new connection via the SOCKS proxy.
+ """
+ extra_kw = {}
+ if self.source_address:
+ extra_kw['source_address'] = self.source_address
+
+ if self.socket_options:
+ extra_kw['socket_options'] = self.socket_options
+
+ try:
+ conn = socks.create_connection(
+ (self.host, self.port),
+ proxy_type=self._socks_options['socks_version'],
+ proxy_addr=self._socks_options['proxy_host'],
+ proxy_port=self._socks_options['proxy_port'],
+ proxy_username=self._socks_options['username'],
+ proxy_password=self._socks_options['password'],
+ proxy_rdns=self._socks_options['rdns'],
+ timeout=self.timeout,
+ **extra_kw
+ )
+
+ except SocketTimeout as e:
+ raise ConnectTimeoutError(
+ self, "Connection to %s timed out. (connect timeout=%s)" %
+ (self.host, self.timeout))
+
+ except socks.ProxyError as e:
+ # This is fragile as hell, but it seems to be the only way to raise
+ # useful errors here.
+ if e.socket_err:
+ error = e.socket_err
+ if isinstance(error, SocketTimeout):
+ raise ConnectTimeoutError(
+ self,
+ "Connection to %s timed out. (connect timeout=%s)" %
+ (self.host, self.timeout)
+ )
+ else:
+ raise NewConnectionError(
+ self,
+ "Failed to establish a new connection: %s" % error
+ )
+ else:
+ raise NewConnectionError(
+ self,
+ "Failed to establish a new connection: %s" % e
+ )
+
+ except SocketError as e: # Defensive: PySocks should catch all these.
+ raise NewConnectionError(
+ self, "Failed to establish a new connection: %s" % e)
+
+ return conn
+
+
+# We don't need to duplicate the Verified/Unverified distinction from
+# urllib3/connection.py here because the HTTPSConnection will already have been
+# correctly set to either the Verified or Unverified form by that module. This
+# means the SOCKSHTTPSConnection will automatically be the correct type.
+class SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection):
+ pass
+
+
+class SOCKSHTTPConnectionPool(HTTPConnectionPool):
+ ConnectionCls = SOCKSConnection
+
+
+class SOCKSHTTPSConnectionPool(HTTPSConnectionPool):
+ ConnectionCls = SOCKSHTTPSConnection
+
+
+class SOCKSProxyManager(PoolManager):
+ """
+ A version of the urllib3 ProxyManager that routes connections via the
+ defined SOCKS proxy.
+ """
+ pool_classes_by_scheme = {
+ 'http': SOCKSHTTPConnectionPool,
+ 'https': SOCKSHTTPSConnectionPool,
+ }
+
+ def __init__(self, proxy_url, username=None, password=None,
+ num_pools=10, headers=None, **connection_pool_kw):
+ parsed = parse_url(proxy_url)
+
+ if parsed.scheme == 'socks5':
+ socks_version = socks.PROXY_TYPE_SOCKS5
+ rdns = False
+ elif parsed.scheme == 'socks5h':
+ socks_version = socks.PROXY_TYPE_SOCKS5
+ rdns = True
+ elif parsed.scheme == 'socks4':
+ socks_version = socks.PROXY_TYPE_SOCKS4
+ rdns = False
+ elif parsed.scheme == 'socks4a':
+ socks_version = socks.PROXY_TYPE_SOCKS4
+ rdns = True
+ else:
+ raise ValueError(
+ "Unable to determine SOCKS version from %s" % proxy_url
+ )
+
+ self.proxy_url = proxy_url
+
+ socks_options = {
+ 'socks_version': socks_version,
+ 'proxy_host': parsed.host,
+ 'proxy_port': parsed.port,
+ 'username': username,
+ 'password': password,
+ 'rdns': rdns
+ }
+ connection_pool_kw['_socks_options'] = socks_options
+
+ super(SOCKSProxyManager, self).__init__(
+ num_pools, headers, **connection_pool_kw
+ )
+
+ self.pool_classes_by_scheme = SOCKSProxyManager.pool_classes_by_scheme
diff --git a/collectors/python.d.plugin/python_modules/urllib3/exceptions.py b/collectors/python.d.plugin/python_modules/urllib3/exceptions.py
new file mode 100644
index 000000000..a71cabe06
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/exceptions.py
@@ -0,0 +1,247 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+from .packages.six.moves.http_client import (
+ IncompleteRead as httplib_IncompleteRead
+)
+# Base Exceptions
+
+
+class HTTPError(Exception):
+ "Base exception used by this module."
+ pass
+
+
+class HTTPWarning(Warning):
+ "Base warning used by this module."
+ pass
+
+
+class PoolError(HTTPError):
+ "Base exception for errors caused within a pool."
+ def __init__(self, pool, message):
+ self.pool = pool
+ HTTPError.__init__(self, "%s: %s" % (pool, message))
+
+ def __reduce__(self):
+ # For pickling purposes.
+ return self.__class__, (None, None)
+
+
+class RequestError(PoolError):
+ "Base exception for PoolErrors that have associated URLs."
+ def __init__(self, pool, url, message):
+ self.url = url
+ PoolError.__init__(self, pool, message)
+
+ def __reduce__(self):
+ # For pickling purposes.
+ return self.__class__, (None, self.url, None)
+
+
+class SSLError(HTTPError):
+ "Raised when SSL certificate fails in an HTTPS connection."
+ pass
+
+
+class ProxyError(HTTPError):
+ "Raised when the connection to a proxy fails."
+ pass
+
+
+class DecodeError(HTTPError):
+ "Raised when automatic decoding based on Content-Type fails."
+ pass
+
+
+class ProtocolError(HTTPError):
+ "Raised when something unexpected happens mid-request/response."
+ pass
+
+
+#: Renamed to ProtocolError but aliased for backwards compatibility.
+ConnectionError = ProtocolError
+
+
+# Leaf Exceptions
+
+class MaxRetryError(RequestError):
+ """Raised when the maximum number of retries is exceeded.
+
+ :param pool: The connection pool
+ :type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`
+ :param string url: The requested Url
+ :param exceptions.Exception reason: The underlying error
+
+ """
+
+ def __init__(self, pool, url, reason=None):
+ self.reason = reason
+
+ message = "Max retries exceeded with url: %s (Caused by %r)" % (
+ url, reason)
+
+ RequestError.__init__(self, pool, url, message)
+
+
+class HostChangedError(RequestError):
+ "Raised when an existing pool gets a request for a foreign host."
+
+ def __init__(self, pool, url, retries=3):
+ message = "Tried to open a foreign host with url: %s" % url
+ RequestError.__init__(self, pool, url, message)
+ self.retries = retries
+
+
+class TimeoutStateError(HTTPError):
+ """ Raised when passing an invalid state to a timeout """
+ pass
+
+
+class TimeoutError(HTTPError):
+ """ Raised when a socket timeout error occurs.
+
+ Catching this error will catch both :exc:`ReadTimeoutErrors
+ <ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.
+ """
+ pass
+
+
+class ReadTimeoutError(TimeoutError, RequestError):
+ "Raised when a socket timeout occurs while receiving data from a server"
+ pass
+
+
+# This timeout error does not have a URL attached and needs to inherit from the
+# base HTTPError
+class ConnectTimeoutError(TimeoutError):
+ "Raised when a socket timeout occurs while connecting to a server"
+ pass
+
+
+class NewConnectionError(ConnectTimeoutError, PoolError):
+ "Raised when we fail to establish a new connection. Usually ECONNREFUSED."
+ pass
+
+
+class EmptyPoolError(PoolError):
+ "Raised when a pool runs out of connections and no more are allowed."
+ pass
+
+
+class ClosedPoolError(PoolError):
+ "Raised when a request enters a pool after the pool has been closed."
+ pass
+
+
+class LocationValueError(ValueError, HTTPError):
+ "Raised when there is something wrong with a given URL input."
+ pass
+
+
+class LocationParseError(LocationValueError):
+ "Raised when get_host or similar fails to parse the URL input."
+
+ def __init__(self, location):
+ message = "Failed to parse: %s" % location
+ HTTPError.__init__(self, message)
+
+ self.location = location
+
+
+class ResponseError(HTTPError):
+ "Used as a container for an error reason supplied in a MaxRetryError."
+ GENERIC_ERROR = 'too many error responses'
+ SPECIFIC_ERROR = 'too many {status_code} error responses'
+
+
+class SecurityWarning(HTTPWarning):
+ "Warned when perfoming security reducing actions"
+ pass
+
+
+class SubjectAltNameWarning(SecurityWarning):
+ "Warned when connecting to a host with a certificate missing a SAN."
+ pass
+
+
+class InsecureRequestWarning(SecurityWarning):
+ "Warned when making an unverified HTTPS request."
+ pass
+
+
+class SystemTimeWarning(SecurityWarning):
+ "Warned when system time is suspected to be wrong"
+ pass
+
+
+class InsecurePlatformWarning(SecurityWarning):
+ "Warned when certain SSL configuration is not available on a platform."
+ pass
+
+
+class SNIMissingWarning(HTTPWarning):
+ "Warned when making a HTTPS request without SNI available."
+ pass
+
+
+class DependencyWarning(HTTPWarning):
+ """
+ Warned when an attempt is made to import a module with missing optional
+ dependencies.
+ """
+ pass
+
+
+class ResponseNotChunked(ProtocolError, ValueError):
+ "Response needs to be chunked in order to read it as chunks."
+ pass
+
+
+class BodyNotHttplibCompatible(HTTPError):
+ """
+ Body should be httplib.HTTPResponse like (have an fp attribute which
+ returns raw chunks) for read_chunked().
+ """
+ pass
+
+
+class IncompleteRead(HTTPError, httplib_IncompleteRead):
+ """
+ Response length doesn't match expected Content-Length
+
+ Subclass of http_client.IncompleteRead to allow int value
+ for `partial` to avoid creating large objects on streamed
+ reads.
+ """
+ def __init__(self, partial, expected):
+ super(IncompleteRead, self).__init__(partial, expected)
+
+ def __repr__(self):
+ return ('IncompleteRead(%i bytes read, '
+ '%i more expected)' % (self.partial, self.expected))
+
+
+class InvalidHeader(HTTPError):
+ "The header provided was somehow invalid."
+ pass
+
+
+class ProxySchemeUnknown(AssertionError, ValueError):
+ "ProxyManager does not support the supplied scheme"
+ # TODO(t-8ch): Stop inheriting from AssertionError in v2.0.
+
+ def __init__(self, scheme):
+ message = "Not supported proxy scheme %s" % scheme
+ super(ProxySchemeUnknown, self).__init__(message)
+
+
+class HeaderParsingError(HTTPError):
+ "Raised by assert_header_parsing, but we convert it to a log.warning statement."
+ def __init__(self, defects, unparsed_data):
+ message = '%s, unparsed data: %r' % (defects or 'Unknown', unparsed_data)
+ super(HeaderParsingError, self).__init__(message)
+
+
+class UnrewindableBodyError(HTTPError):
+ "urllib3 encountered an error when trying to rewind a body"
+ pass
diff --git a/collectors/python.d.plugin/python_modules/urllib3/fields.py b/collectors/python.d.plugin/python_modules/urllib3/fields.py
new file mode 100644
index 000000000..de7577b74
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/fields.py
@@ -0,0 +1,179 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+import email.utils
+import mimetypes
+
+from .packages import six
+
+
+def guess_content_type(filename, default='application/octet-stream'):
+ """
+ Guess the "Content-Type" of a file.
+
+ :param filename:
+ The filename to guess the "Content-Type" of using :mod:`mimetypes`.
+ :param default:
+ If no "Content-Type" can be guessed, default to `default`.
+ """
+ if filename:
+ return mimetypes.guess_type(filename)[0] or default
+ return default
+
+
+def format_header_param(name, value):
+ """
+ Helper function to format and quote a single header parameter.
+
+ Particularly useful for header parameters which might contain
+ non-ASCII values, like file names. This follows RFC 2231, as
+ suggested by RFC 2388 Section 4.4.
+
+ :param name:
+ The name of the parameter, a string expected to be ASCII only.
+ :param value:
+ The value of the parameter, provided as a unicode string.
+ """
+ if not any(ch in value for ch in '"\\\r\n'):
+ result = '%s="%s"' % (name, value)
+ try:
+ result.encode('ascii')
+ except (UnicodeEncodeError, UnicodeDecodeError):
+ pass
+ else:
+ return result
+ if not six.PY3 and isinstance(value, six.text_type): # Python 2:
+ value = value.encode('utf-8')
+ value = email.utils.encode_rfc2231(value, 'utf-8')
+ value = '%s*=%s' % (name, value)
+ return value
+
+
+class RequestField(object):
+ """
+ A data container for request body parameters.
+
+ :param name:
+ The name of this request field.
+ :param data:
+ The data/value body.
+ :param filename:
+ An optional filename of the request field.
+ :param headers:
+ An optional dict-like object of headers to initially use for the field.
+ """
+ def __init__(self, name, data, filename=None, headers=None):
+ self._name = name
+ self._filename = filename
+ self.data = data
+ self.headers = {}
+ if headers:
+ self.headers = dict(headers)
+
+ @classmethod
+ def from_tuples(cls, fieldname, value):
+ """
+ A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
+
+ Supports constructing :class:`~urllib3.fields.RequestField` from
+ parameter of key/value strings AND key/filetuple. A filetuple is a
+ (filename, data, MIME type) tuple where the MIME type is optional.
+ For example::
+
+ 'foo': 'bar',
+ 'fakefile': ('foofile.txt', 'contents of foofile'),
+ 'realfile': ('barfile.txt', open('realfile').read()),
+ 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
+ 'nonamefile': 'contents of nonamefile field',
+
+ Field names and filenames must be unicode.
+ """
+ if isinstance(value, tuple):
+ if len(value) == 3:
+ filename, data, content_type = value
+ else:
+ filename, data = value
+ content_type = guess_content_type(filename)
+ else:
+ filename = None
+ content_type = None
+ data = value
+
+ request_param = cls(fieldname, data, filename=filename)
+ request_param.make_multipart(content_type=content_type)
+
+ return request_param
+
+ def _render_part(self, name, value):
+ """
+ Overridable helper function to format a single header parameter.
+
+ :param name:
+ The name of the parameter, a string expected to be ASCII only.
+ :param value:
+ The value of the parameter, provided as a unicode string.
+ """
+ return format_header_param(name, value)
+
+ def _render_parts(self, header_parts):
+ """
+ Helper function to format and quote a single header.
+
+ Useful for single headers that are composed of multiple items. E.g.,
+ 'Content-Disposition' fields.
+
+ :param header_parts:
+ A sequence of (k, v) typles or a :class:`dict` of (k, v) to format
+ as `k1="v1"; k2="v2"; ...`.
+ """
+ parts = []
+ iterable = header_parts
+ if isinstance(header_parts, dict):
+ iterable = header_parts.items()
+
+ for name, value in iterable:
+ if value is not None:
+ parts.append(self._render_part(name, value))
+
+ return '; '.join(parts)
+
+ def render_headers(self):
+ """
+ Renders the headers for this request field.
+ """
+ lines = []
+
+ sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
+ for sort_key in sort_keys:
+ if self.headers.get(sort_key, False):
+ lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
+
+ for header_name, header_value in self.headers.items():
+ if header_name not in sort_keys:
+ if header_value:
+ lines.append('%s: %s' % (header_name, header_value))
+
+ lines.append('\r\n')
+ return '\r\n'.join(lines)
+
+ def make_multipart(self, content_disposition=None, content_type=None,
+ content_location=None):
+ """
+ Makes this request field into a multipart request field.
+
+ This method overrides "Content-Disposition", "Content-Type" and
+ "Content-Location" headers to the request parameter.
+
+ :param content_type:
+ The 'Content-Type' of the request body.
+ :param content_location:
+ The 'Content-Location' of the request body.
+
+ """
+ self.headers['Content-Disposition'] = content_disposition or 'form-data'
+ self.headers['Content-Disposition'] += '; '.join([
+ '', self._render_parts(
+ (('name', self._name), ('filename', self._filename))
+ )
+ ])
+ self.headers['Content-Type'] = content_type
+ self.headers['Content-Location'] = content_location
diff --git a/collectors/python.d.plugin/python_modules/urllib3/filepost.py b/collectors/python.d.plugin/python_modules/urllib3/filepost.py
new file mode 100644
index 000000000..3febc9cfe
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/filepost.py
@@ -0,0 +1,95 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+import codecs
+
+from uuid import uuid4
+from io import BytesIO
+
+from .packages import six
+from .packages.six import b
+from .fields import RequestField
+
+writer = codecs.lookup('utf-8')[3]
+
+
+def choose_boundary():
+ """
+ Our embarrassingly-simple replacement for mimetools.choose_boundary.
+ """
+ return uuid4().hex
+
+
+def iter_field_objects(fields):
+ """
+ Iterate over fields.
+
+ Supports list of (k, v) tuples and dicts, and lists of
+ :class:`~urllib3.fields.RequestField`.
+
+ """
+ if isinstance(fields, dict):
+ i = six.iteritems(fields)
+ else:
+ i = iter(fields)
+
+ for field in i:
+ if isinstance(field, RequestField):
+ yield field
+ else:
+ yield RequestField.from_tuples(*field)
+
+
+def iter_fields(fields):
+ """
+ .. deprecated:: 1.6
+
+ Iterate over fields.
+
+ The addition of :class:`~urllib3.fields.RequestField` makes this function
+ obsolete. Instead, use :func:`iter_field_objects`, which returns
+ :class:`~urllib3.fields.RequestField` objects.
+
+ Supports list of (k, v) tuples and dicts.
+ """
+ if isinstance(fields, dict):
+ return ((k, v) for k, v in six.iteritems(fields))
+
+ return ((k, v) for k, v in fields)
+
+
+def encode_multipart_formdata(fields, boundary=None):
+ """
+ Encode a dictionary of ``fields`` using the multipart/form-data MIME format.
+
+ :param fields:
+ Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`).
+
+ :param boundary:
+ If not specified, then a random boundary will be generated using
+ :func:`mimetools.choose_boundary`.
+ """
+ body = BytesIO()
+ if boundary is None:
+ boundary = choose_boundary()
+
+ for field in iter_field_objects(fields):
+ body.write(b('--%s\r\n' % (boundary)))
+
+ writer(body).write(field.render_headers())
+ data = field.data
+
+ if isinstance(data, int):
+ data = str(data) # Backwards compatibility
+
+ if isinstance(data, six.text_type):
+ writer(body).write(data)
+ else:
+ body.write(data)
+
+ body.write(b'\r\n')
+
+ body.write(b('--%s--\r\n' % (boundary)))
+
+ content_type = str('multipart/form-data; boundary=%s' % boundary)
+
+ return body.getvalue(), content_type
diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py
new file mode 100644
index 000000000..170e974c1
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py
@@ -0,0 +1,5 @@
+from __future__ import absolute_import
+
+from . import ssl_match_hostname
+
+__all__ = ('ssl_match_hostname', )
diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/backports/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/packages/backports/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/backports/__init__.py
diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py b/collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py
new file mode 100644
index 000000000..8ab122f8b
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+# SPDX-License-Identifier: MIT
+"""
+backports.makefile
+~~~~~~~~~~~~~~~~~~
+
+Backports the Python 3 ``socket.makefile`` method for use with anything that
+wants to create a "fake" socket object.
+"""
+import io
+
+from socket import SocketIO
+
+
+def backport_makefile(self, mode="r", buffering=None, encoding=None,
+ errors=None, newline=None):
+ """
+ Backport of ``socket.makefile`` from Python 3.5.
+ """
+ if not set(mode) <= set(["r", "w", "b"]):
+ raise ValueError(
+ "invalid mode %r (only r, w, b allowed)" % (mode,)
+ )
+ writing = "w" in mode
+ reading = "r" in mode or not writing
+ assert reading or writing
+ binary = "b" in mode
+ rawmode = ""
+ if reading:
+ rawmode += "r"
+ if writing:
+ rawmode += "w"
+ raw = SocketIO(self, rawmode)
+ self._makefile_refs += 1
+ if buffering is None:
+ buffering = -1
+ if buffering < 0:
+ buffering = io.DEFAULT_BUFFER_SIZE
+ if buffering == 0:
+ if not binary:
+ raise ValueError("unbuffered streams must be binary")
+ return raw
+ if reading and writing:
+ buffer = io.BufferedRWPair(raw, raw, buffering)
+ elif reading:
+ buffer = io.BufferedReader(raw, buffering)
+ else:
+ assert writing
+ buffer = io.BufferedWriter(raw, buffering)
+ if binary:
+ return buffer
+ text = io.TextIOWrapper(buffer, encoding, errors, newline)
+ text.mode = mode
+ return text
diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py b/collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py
new file mode 100644
index 000000000..9f7c0e6b8
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py
@@ -0,0 +1,260 @@
+# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
+# Passes Python2.7's test suite and incorporates all the latest updates.
+# Copyright 2009 Raymond Hettinger, released under the MIT License.
+# http://code.activestate.com/recipes/576693/
+# SPDX-License-Identifier: MIT
+try:
+ from thread import get_ident as _get_ident
+except ImportError:
+ from dummy_thread import get_ident as _get_ident
+
+try:
+ from _abcoll import KeysView, ValuesView, ItemsView
+except ImportError:
+ pass
+
+
+class OrderedDict(dict):
+ 'Dictionary that remembers insertion order'
+ # An inherited dict maps keys to values.
+ # The inherited dict provides __getitem__, __len__, __contains__, and get.
+ # The remaining methods are order-aware.
+ # Big-O running times for all methods are the same as for regular dictionaries.
+
+ # The internal self.__map dictionary maps keys to links in a doubly linked list.
+ # The circular doubly linked list starts and ends with a sentinel element.
+ # The sentinel element never gets deleted (this simplifies the algorithm).
+ # Each link is stored as a list of length three: [PREV, NEXT, KEY].
+
+ def __init__(self, *args, **kwds):
+ '''Initialize an ordered dictionary. Signature is the same as for
+ regular dictionaries, but keyword arguments are not recommended
+ because their insertion order is arbitrary.
+
+ '''
+ if len(args) > 1:
+ raise TypeError('expected at most 1 arguments, got %d' % len(args))
+ try:
+ self.__root
+ except AttributeError:
+ self.__root = root = [] # sentinel node
+ root[:] = [root, root, None]
+ self.__map = {}
+ self.__update(*args, **kwds)
+
+ def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
+ 'od.__setitem__(i, y) <==> od[i]=y'
+ # Setting a new item creates a new link which goes at the end of the linked
+ # list, and the inherited dictionary is updated with the new key/value pair.
+ if key not in self:
+ root = self.__root
+ last = root[0]
+ last[1] = root[0] = self.__map[key] = [last, root, key]
+ dict_setitem(self, key, value)
+
+ def __delitem__(self, key, dict_delitem=dict.__delitem__):
+ 'od.__delitem__(y) <==> del od[y]'
+ # Deleting an existing item uses self.__map to find the link which is
+ # then removed by updating the links in the predecessor and successor nodes.
+ dict_delitem(self, key)
+ link_prev, link_next, key = self.__map.pop(key)
+ link_prev[1] = link_next
+ link_next[0] = link_prev
+
+ def __iter__(self):
+ 'od.__iter__() <==> iter(od)'
+ root = self.__root
+ curr = root[1]
+ while curr is not root:
+ yield curr[2]
+ curr = curr[1]
+
+ def __reversed__(self):
+ 'od.__reversed__() <==> reversed(od)'
+ root = self.__root
+ curr = root[0]
+ while curr is not root:
+ yield curr[2]
+ curr = curr[0]
+
+ def clear(self):
+ 'od.clear() -> None. Remove all items from od.'
+ try:
+ for node in self.__map.itervalues():
+ del node[:]
+ root = self.__root
+ root[:] = [root, root, None]
+ self.__map.clear()
+ except AttributeError:
+ pass
+ dict.clear(self)
+
+ def popitem(self, last=True):
+ '''od.popitem() -> (k, v), return and remove a (key, value) pair.
+ Pairs are returned in LIFO order if last is true or FIFO order if false.
+
+ '''
+ if not self:
+ raise KeyError('dictionary is empty')
+ root = self.__root
+ if last:
+ link = root[0]
+ link_prev = link[0]
+ link_prev[1] = root
+ root[0] = link_prev
+ else:
+ link = root[1]
+ link_next = link[1]
+ root[1] = link_next
+ link_next[0] = root
+ key = link[2]
+ del self.__map[key]
+ value = dict.pop(self, key)
+ return key, value
+
+ # -- the following methods do not depend on the internal structure --
+
+ def keys(self):
+ 'od.keys() -> list of keys in od'
+ return list(self)
+
+ def values(self):
+ 'od.values() -> list of values in od'
+ return [self[key] for key in self]
+
+ def items(self):
+ 'od.items() -> list of (key, value) pairs in od'
+ return [(key, self[key]) for key in self]
+
+ def iterkeys(self):
+ 'od.iterkeys() -> an iterator over the keys in od'
+ return iter(self)
+
+ def itervalues(self):
+ 'od.itervalues -> an iterator over the values in od'
+ for k in self:
+ yield self[k]
+
+ def iteritems(self):
+ 'od.iteritems -> an iterator over the (key, value) items in od'
+ for k in self:
+ yield (k, self[k])
+
+ def update(*args, **kwds):
+ '''od.update(E, **F) -> None. Update od from dict/iterable E and F.
+
+ If E is a dict instance, does: for k in E: od[k] = E[k]
+ If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
+ Or if E is an iterable of items, does: for k, v in E: od[k] = v
+ In either case, this is followed by: for k, v in F.items(): od[k] = v
+
+ '''
+ if len(args) > 2:
+ raise TypeError('update() takes at most 2 positional '
+ 'arguments (%d given)' % (len(args),))
+ elif not args:
+ raise TypeError('update() takes at least 1 argument (0 given)')
+ self = args[0]
+ # Make progressively weaker assumptions about "other"
+ other = ()
+ if len(args) == 2:
+ other = args[1]
+ if isinstance(other, dict):
+ for key in other:
+ self[key] = other[key]
+ elif hasattr(other, 'keys'):
+ for key in other.keys():
+ self[key] = other[key]
+ else:
+ for key, value in other:
+ self[key] = value
+ for key, value in kwds.items():
+ self[key] = value
+
+ __update = update # let subclasses override update without breaking __init__
+
+ __marker = object()
+
+ def pop(self, key, default=__marker):
+ '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
+ If key is not found, d is returned if given, otherwise KeyError is raised.
+
+ '''
+ if key in self:
+ result = self[key]
+ del self[key]
+ return result
+ if default is self.__marker:
+ raise KeyError(key)
+ return default
+
+ def setdefault(self, key, default=None):
+ 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
+ if key in self:
+ return self[key]
+ self[key] = default
+ return default
+
+ def __repr__(self, _repr_running={}):
+ 'od.__repr__() <==> repr(od)'
+ call_key = id(self), _get_ident()
+ if call_key in _repr_running:
+ return '...'
+ _repr_running[call_key] = 1
+ try:
+ if not self:
+ return '%s()' % (self.__class__.__name__,)
+ return '%s(%r)' % (self.__class__.__name__, self.items())
+ finally:
+ del _repr_running[call_key]
+
+ def __reduce__(self):
+ 'Return state information for pickling'
+ items = [[k, self[k]] for k in self]
+ inst_dict = vars(self).copy()
+ for k in vars(OrderedDict()):
+ inst_dict.pop(k, None)
+ if inst_dict:
+ return (self.__class__, (items,), inst_dict)
+ return self.__class__, (items,)
+
+ def copy(self):
+ 'od.copy() -> a shallow copy of od'
+ return self.__class__(self)
+
+ @classmethod
+ def fromkeys(cls, iterable, value=None):
+ '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
+ and values equal to v (which defaults to None).
+
+ '''
+ d = cls()
+ for key in iterable:
+ d[key] = value
+ return d
+
+ def __eq__(self, other):
+ '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
+ while comparison to a regular mapping is order-insensitive.
+
+ '''
+ if isinstance(other, OrderedDict):
+ return len(self)==len(other) and self.items() == other.items()
+ return dict.__eq__(self, other)
+
+ def __ne__(self, other):
+ return not self == other
+
+ # -- the following methods are only used in Python 2.7 --
+
+ def viewkeys(self):
+ "od.viewkeys() -> a set-like object providing a view on od's keys"
+ return KeysView(self)
+
+ def viewvalues(self):
+ "od.viewvalues() -> an object providing a view on od's values"
+ return ValuesView(self)
+
+ def viewitems(self):
+ "od.viewitems() -> a set-like object providing a view on od's items"
+ return ItemsView(self)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/six.py b/collectors/python.d.plugin/python_modules/urllib3/packages/six.py
new file mode 100644
index 000000000..31df5012b
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/six.py
@@ -0,0 +1,852 @@
+"""Utilities for writing code that runs on Python 2 and 3"""
+
+# Copyright (c) 2010-2015 Benjamin Peterson
+#
+# SPDX-License-Identifier: MIT
+
+from __future__ import absolute_import
+
+import functools
+import itertools
+import operator
+import sys
+import types
+
+__author__ = "Benjamin Peterson <benjamin@python.org>"
+__version__ = "1.10.0"
+
+
+# Useful for very coarse version differentiation.
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+PY34 = sys.version_info[0:2] >= (3, 4)
+
+if PY3:
+ string_types = str,
+ integer_types = int,
+ class_types = type,
+ text_type = str
+ binary_type = bytes
+
+ MAXSIZE = sys.maxsize
+else:
+ string_types = basestring,
+ integer_types = (int, long)
+ class_types = (type, types.ClassType)
+ text_type = unicode
+ binary_type = str
+
+ if sys.platform.startswith("java"):
+ # Jython always uses 32 bits.
+ MAXSIZE = int((1 << 31) - 1)
+ else:
+ # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
+ class X(object):
+
+ def __len__(self):
+ return 1 << 31
+ try:
+ len(X())
+ except OverflowError:
+ # 32-bit
+ MAXSIZE = int((1 << 31) - 1)
+ else:
+ # 64-bit
+ MAXSIZE = int((1 << 63) - 1)
+ del X
+
+
+def _add_doc(func, doc):
+ """Add documentation to a function."""
+ func.__doc__ = doc
+
+
+def _import_module(name):
+ """Import module, returning the module after the last dot."""
+ __import__(name)
+ return sys.modules[name]
+
+
+class _LazyDescr(object):
+
+ def __init__(self, name):
+ self.name = name
+
+ def __get__(self, obj, tp):
+ result = self._resolve()
+ setattr(obj, self.name, result) # Invokes __set__.
+ try:
+ # This is a bit ugly, but it avoids running this again by
+ # removing this descriptor.
+ delattr(obj.__class__, self.name)
+ except AttributeError:
+ pass
+ return result
+
+
+class MovedModule(_LazyDescr):
+
+ def __init__(self, name, old, new=None):
+ super(MovedModule, self).__init__(name)
+ if PY3:
+ if new is None:
+ new = name
+ self.mod = new
+ else:
+ self.mod = old
+
+ def _resolve(self):
+ return _import_module(self.mod)
+
+ def __getattr__(self, attr):
+ _module = self._resolve()
+ value = getattr(_module, attr)
+ setattr(self, attr, value)
+ return value
+
+
+class _LazyModule(types.ModuleType):
+
+ def __init__(self, name):
+ super(_LazyModule, self).__init__(name)
+ self.__doc__ = self.__class__.__doc__
+
+ def __dir__(self):
+ attrs = ["__doc__", "__name__"]
+ attrs += [attr.name for attr in self._moved_attributes]
+ return attrs
+
+ # Subclasses should override this
+ _moved_attributes = []
+
+
+class MovedAttribute(_LazyDescr):
+
+ def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
+ super(MovedAttribute, self).__init__(name)
+ if PY3:
+ if new_mod is None:
+ new_mod = name
+ self.mod = new_mod
+ if new_attr is None:
+ if old_attr is None:
+ new_attr = name
+ else:
+ new_attr = old_attr
+ self.attr = new_attr
+ else:
+ self.mod = old_mod
+ if old_attr is None:
+ old_attr = name
+ self.attr = old_attr
+
+ def _resolve(self):
+ module = _import_module(self.mod)
+ return getattr(module, self.attr)
+
+
+class _SixMetaPathImporter(object):
+
+ """
+ A meta path importer to import six.moves and its submodules.
+
+ This class implements a PEP302 finder and loader. It should be compatible
+ with Python 2.5 and all existing versions of Python3
+ """
+
+ def __init__(self, six_module_name):
+ self.name = six_module_name
+ self.known_modules = {}
+
+ def _add_module(self, mod, *fullnames):
+ for fullname in fullnames:
+ self.known_modules[self.name + "." + fullname] = mod
+
+ def _get_module(self, fullname):
+ return self.known_modules[self.name + "." + fullname]
+
+ def find_module(self, fullname, path=None):
+ if fullname in self.known_modules:
+ return self
+ return None
+
+ def __get_module(self, fullname):
+ try:
+ return self.known_modules[fullname]
+ except KeyError:
+ raise ImportError("This loader does not know module " + fullname)
+
+ def load_module(self, fullname):
+ try:
+ # in case of a reload
+ return sys.modules[fullname]
+ except KeyError:
+ pass
+ mod = self.__get_module(fullname)
+ if isinstance(mod, MovedModule):
+ mod = mod._resolve()
+ else:
+ mod.__loader__ = self
+ sys.modules[fullname] = mod
+ return mod
+
+ def is_package(self, fullname):
+ """
+ Return true, if the named module is a package.
+
+ We need this method to get correct spec objects with
+ Python 3.4 (see PEP451)
+ """
+ return hasattr(self.__get_module(fullname), "__path__")
+
+ def get_code(self, fullname):
+ """Return None
+
+ Required, if is_package is implemented"""
+ self.__get_module(fullname) # eventually raises ImportError
+ return None
+ get_source = get_code # same as get_code
+
+_importer = _SixMetaPathImporter(__name__)
+
+
+class _MovedItems(_LazyModule):
+
+ """Lazy loading of moved objects"""
+ __path__ = [] # mark as package
+
+
+_moved_attributes = [
+ MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
+ MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
+ MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
+ MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
+ MovedAttribute("intern", "__builtin__", "sys"),
+ MovedAttribute("map", "itertools", "builtins", "imap", "map"),
+ MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
+ MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
+ MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
+ MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
+ MovedAttribute("reduce", "__builtin__", "functools"),
+ MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
+ MovedAttribute("StringIO", "StringIO", "io"),
+ MovedAttribute("UserDict", "UserDict", "collections"),
+ MovedAttribute("UserList", "UserList", "collections"),
+ MovedAttribute("UserString", "UserString", "collections"),
+ MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
+ MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
+ MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
+ MovedModule("builtins", "__builtin__"),
+ MovedModule("configparser", "ConfigParser"),
+ MovedModule("copyreg", "copy_reg"),
+ MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
+ MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
+ MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
+ MovedModule("http_cookies", "Cookie", "http.cookies"),
+ MovedModule("html_entities", "htmlentitydefs", "html.entities"),
+ MovedModule("html_parser", "HTMLParser", "html.parser"),
+ MovedModule("http_client", "httplib", "http.client"),
+ MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
+ MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
+ MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
+ MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
+ MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
+ MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
+ MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
+ MovedModule("cPickle", "cPickle", "pickle"),
+ MovedModule("queue", "Queue"),
+ MovedModule("reprlib", "repr"),
+ MovedModule("socketserver", "SocketServer"),
+ MovedModule("_thread", "thread", "_thread"),
+ MovedModule("tkinter", "Tkinter"),
+ MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
+ MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
+ MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
+ MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
+ MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
+ MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
+ MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
+ MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
+ MovedModule("tkinter_colorchooser", "tkColorChooser",
+ "tkinter.colorchooser"),
+ MovedModule("tkinter_commondialog", "tkCommonDialog",
+ "tkinter.commondialog"),
+ MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
+ MovedModule("tkinter_font", "tkFont", "tkinter.font"),
+ MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
+ MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
+ "tkinter.simpledialog"),
+ MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
+ MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
+ MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
+ MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
+ MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
+ MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
+]
+# Add windows specific modules.
+if sys.platform == "win32":
+ _moved_attributes += [
+ MovedModule("winreg", "_winreg"),
+ ]
+
+for attr in _moved_attributes:
+ setattr(_MovedItems, attr.name, attr)
+ if isinstance(attr, MovedModule):
+ _importer._add_module(attr, "moves." + attr.name)
+del attr
+
+_MovedItems._moved_attributes = _moved_attributes
+
+moves = _MovedItems(__name__ + ".moves")
+_importer._add_module(moves, "moves")
+
+
+class Module_six_moves_urllib_parse(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_parse"""
+
+
+_urllib_parse_moved_attributes = [
+ MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
+ MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
+ MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
+ MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
+ MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
+ MovedAttribute("urljoin", "urlparse", "urllib.parse"),
+ MovedAttribute("urlparse", "urlparse", "urllib.parse"),
+ MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
+ MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
+ MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
+ MovedAttribute("quote", "urllib", "urllib.parse"),
+ MovedAttribute("quote_plus", "urllib", "urllib.parse"),
+ MovedAttribute("unquote", "urllib", "urllib.parse"),
+ MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
+ MovedAttribute("urlencode", "urllib", "urllib.parse"),
+ MovedAttribute("splitquery", "urllib", "urllib.parse"),
+ MovedAttribute("splittag", "urllib", "urllib.parse"),
+ MovedAttribute("splituser", "urllib", "urllib.parse"),
+ MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_params", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_query", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
+]
+for attr in _urllib_parse_moved_attributes:
+ setattr(Module_six_moves_urllib_parse, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
+ "moves.urllib_parse", "moves.urllib.parse")
+
+
+class Module_six_moves_urllib_error(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_error"""
+
+
+_urllib_error_moved_attributes = [
+ MovedAttribute("URLError", "urllib2", "urllib.error"),
+ MovedAttribute("HTTPError", "urllib2", "urllib.error"),
+ MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
+]
+for attr in _urllib_error_moved_attributes:
+ setattr(Module_six_moves_urllib_error, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
+ "moves.urllib_error", "moves.urllib.error")
+
+
+class Module_six_moves_urllib_request(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_request"""
+
+
+_urllib_request_moved_attributes = [
+ MovedAttribute("urlopen", "urllib2", "urllib.request"),
+ MovedAttribute("install_opener", "urllib2", "urllib.request"),
+ MovedAttribute("build_opener", "urllib2", "urllib.request"),
+ MovedAttribute("pathname2url", "urllib", "urllib.request"),
+ MovedAttribute("url2pathname", "urllib", "urllib.request"),
+ MovedAttribute("getproxies", "urllib", "urllib.request"),
+ MovedAttribute("Request", "urllib2", "urllib.request"),
+ MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
+ MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
+ MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
+ MovedAttribute("FileHandler", "urllib2", "urllib.request"),
+ MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
+ MovedAttribute("urlretrieve", "urllib", "urllib.request"),
+ MovedAttribute("urlcleanup", "urllib", "urllib.request"),
+ MovedAttribute("URLopener", "urllib", "urllib.request"),
+ MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
+ MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
+]
+for attr in _urllib_request_moved_attributes:
+ setattr(Module_six_moves_urllib_request, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
+ "moves.urllib_request", "moves.urllib.request")
+
+
+class Module_six_moves_urllib_response(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_response"""
+
+
+_urllib_response_moved_attributes = [
+ MovedAttribute("addbase", "urllib", "urllib.response"),
+ MovedAttribute("addclosehook", "urllib", "urllib.response"),
+ MovedAttribute("addinfo", "urllib", "urllib.response"),
+ MovedAttribute("addinfourl", "urllib", "urllib.response"),
+]
+for attr in _urllib_response_moved_attributes:
+ setattr(Module_six_moves_urllib_response, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
+ "moves.urllib_response", "moves.urllib.response")
+
+
+class Module_six_moves_urllib_robotparser(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_robotparser"""
+
+
+_urllib_robotparser_moved_attributes = [
+ MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
+]
+for attr in _urllib_robotparser_moved_attributes:
+ setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
+ "moves.urllib_robotparser", "moves.urllib.robotparser")
+
+
+class Module_six_moves_urllib(types.ModuleType):
+
+ """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
+ __path__ = [] # mark as package
+ parse = _importer._get_module("moves.urllib_parse")
+ error = _importer._get_module("moves.urllib_error")
+ request = _importer._get_module("moves.urllib_request")
+ response = _importer._get_module("moves.urllib_response")
+ robotparser = _importer._get_module("moves.urllib_robotparser")
+
+ def __dir__(self):
+ return ['parse', 'error', 'request', 'response', 'robotparser']
+
+_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
+ "moves.urllib")
+
+
+def add_move(move):
+ """Add an item to six.moves."""
+ setattr(_MovedItems, move.name, move)
+
+
+def remove_move(name):
+ """Remove item from six.moves."""
+ try:
+ delattr(_MovedItems, name)
+ except AttributeError:
+ try:
+ del moves.__dict__[name]
+ except KeyError:
+ raise AttributeError("no such move, %r" % (name,))
+
+
+if PY3:
+ _meth_func = "__func__"
+ _meth_self = "__self__"
+
+ _func_closure = "__closure__"
+ _func_code = "__code__"
+ _func_defaults = "__defaults__"
+ _func_globals = "__globals__"
+else:
+ _meth_func = "im_func"
+ _meth_self = "im_self"
+
+ _func_closure = "func_closure"
+ _func_code = "func_code"
+ _func_defaults = "func_defaults"
+ _func_globals = "func_globals"
+
+
+try:
+ advance_iterator = next
+except NameError:
+ def advance_iterator(it):
+ return it.next()
+next = advance_iterator
+
+
+try:
+ callable = callable
+except NameError:
+ def callable(obj):
+ return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
+
+
+if PY3:
+ def get_unbound_function(unbound):
+ return unbound
+
+ create_bound_method = types.MethodType
+
+ def create_unbound_method(func, cls):
+ return func
+
+ Iterator = object
+else:
+ def get_unbound_function(unbound):
+ return unbound.im_func
+
+ def create_bound_method(func, obj):
+ return types.MethodType(func, obj, obj.__class__)
+
+ def create_unbound_method(func, cls):
+ return types.MethodType(func, None, cls)
+
+ class Iterator(object):
+
+ def next(self):
+ return type(self).__next__(self)
+
+ callable = callable
+_add_doc(get_unbound_function,
+ """Get the function out of a possibly unbound function""")
+
+
+get_method_function = operator.attrgetter(_meth_func)
+get_method_self = operator.attrgetter(_meth_self)
+get_function_closure = operator.attrgetter(_func_closure)
+get_function_code = operator.attrgetter(_func_code)
+get_function_defaults = operator.attrgetter(_func_defaults)
+get_function_globals = operator.attrgetter(_func_globals)
+
+
+if PY3:
+ def iterkeys(d, **kw):
+ return iter(d.keys(**kw))
+
+ def itervalues(d, **kw):
+ return iter(d.values(**kw))
+
+ def iteritems(d, **kw):
+ return iter(d.items(**kw))
+
+ def iterlists(d, **kw):
+ return iter(d.lists(**kw))
+
+ viewkeys = operator.methodcaller("keys")
+
+ viewvalues = operator.methodcaller("values")
+
+ viewitems = operator.methodcaller("items")
+else:
+ def iterkeys(d, **kw):
+ return d.iterkeys(**kw)
+
+ def itervalues(d, **kw):
+ return d.itervalues(**kw)
+
+ def iteritems(d, **kw):
+ return d.iteritems(**kw)
+
+ def iterlists(d, **kw):
+ return d.iterlists(**kw)
+
+ viewkeys = operator.methodcaller("viewkeys")
+
+ viewvalues = operator.methodcaller("viewvalues")
+
+ viewitems = operator.methodcaller("viewitems")
+
+_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
+_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
+_add_doc(iteritems,
+ "Return an iterator over the (key, value) pairs of a dictionary.")
+_add_doc(iterlists,
+ "Return an iterator over the (key, [values]) pairs of a dictionary.")
+
+
+if PY3:
+ def b(s):
+ return s.encode("latin-1")
+
+ def u(s):
+ return s
+ unichr = chr
+ import struct
+ int2byte = struct.Struct(">B").pack
+ del struct
+ byte2int = operator.itemgetter(0)
+ indexbytes = operator.getitem
+ iterbytes = iter
+ import io
+ StringIO = io.StringIO
+ BytesIO = io.BytesIO
+ _assertCountEqual = "assertCountEqual"
+ if sys.version_info[1] <= 1:
+ _assertRaisesRegex = "assertRaisesRegexp"
+ _assertRegex = "assertRegexpMatches"
+ else:
+ _assertRaisesRegex = "assertRaisesRegex"
+ _assertRegex = "assertRegex"
+else:
+ def b(s):
+ return s
+ # Workaround for standalone backslash
+
+ def u(s):
+ return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
+ unichr = unichr
+ int2byte = chr
+
+ def byte2int(bs):
+ return ord(bs[0])
+
+ def indexbytes(buf, i):
+ return ord(buf[i])
+ iterbytes = functools.partial(itertools.imap, ord)
+ import StringIO
+ StringIO = BytesIO = StringIO.StringIO
+ _assertCountEqual = "assertItemsEqual"
+ _assertRaisesRegex = "assertRaisesRegexp"
+ _assertRegex = "assertRegexpMatches"
+_add_doc(b, """Byte literal""")
+_add_doc(u, """Text literal""")
+
+
+def assertCountEqual(self, *args, **kwargs):
+ return getattr(self, _assertCountEqual)(*args, **kwargs)
+
+
+def assertRaisesRegex(self, *args, **kwargs):
+ return getattr(self, _assertRaisesRegex)(*args, **kwargs)
+
+
+def assertRegex(self, *args, **kwargs):
+ return getattr(self, _assertRegex)(*args, **kwargs)
+
+
+if PY3:
+ exec_ = getattr(moves.builtins, "exec")
+
+ def reraise(tp, value, tb=None):
+ if value is None:
+ value = tp()
+ if value.__traceback__ is not tb:
+ raise value.with_traceback(tb)
+ raise value
+
+else:
+ def exec_(_code_, _globs_=None, _locs_=None):
+ """Execute code in a namespace."""
+ if _globs_ is None:
+ frame = sys._getframe(1)
+ _globs_ = frame.f_globals
+ if _locs_ is None:
+ _locs_ = frame.f_locals
+ del frame
+ elif _locs_ is None:
+ _locs_ = _globs_
+ exec("""exec _code_ in _globs_, _locs_""")
+
+ exec_("""def reraise(tp, value, tb=None):
+ raise tp, value, tb
+""")
+
+
+if sys.version_info[:2] == (3, 2):
+ exec_("""def raise_from(value, from_value):
+ if from_value is None:
+ raise value
+ raise value from from_value
+""")
+elif sys.version_info[:2] > (3, 2):
+ exec_("""def raise_from(value, from_value):
+ raise value from from_value
+""")
+else:
+ def raise_from(value, from_value):
+ raise value
+
+
+print_ = getattr(moves.builtins, "print", None)
+if print_ is None:
+ def print_(*args, **kwargs):
+ """The new-style print function for Python 2.4 and 2.5."""
+ fp = kwargs.pop("file", sys.stdout)
+ if fp is None:
+ return
+
+ def write(data):
+ if not isinstance(data, basestring):
+ data = str(data)
+ # If the file has an encoding, encode unicode with it.
+ if (isinstance(fp, file) and
+ isinstance(data, unicode) and
+ fp.encoding is not None):
+ errors = getattr(fp, "errors", None)
+ if errors is None:
+ errors = "strict"
+ data = data.encode(fp.encoding, errors)
+ fp.write(data)
+ want_unicode = False
+ sep = kwargs.pop("sep", None)
+ if sep is not None:
+ if isinstance(sep, unicode):
+ want_unicode = True
+ elif not isinstance(sep, str):
+ raise TypeError("sep must be None or a string")
+ end = kwargs.pop("end", None)
+ if end is not None:
+ if isinstance(end, unicode):
+ want_unicode = True
+ elif not isinstance(end, str):
+ raise TypeError("end must be None or a string")
+ if kwargs:
+ raise TypeError("invalid keyword arguments to print()")
+ if not want_unicode:
+ for arg in args:
+ if isinstance(arg, unicode):
+ want_unicode = True
+ break
+ if want_unicode:
+ newline = unicode("\n")
+ space = unicode(" ")
+ else:
+ newline = "\n"
+ space = " "
+ if sep is None:
+ sep = space
+ if end is None:
+ end = newline
+ for i, arg in enumerate(args):
+ if i:
+ write(sep)
+ write(arg)
+ write(end)
+if sys.version_info[:2] < (3, 3):
+ _print = print_
+
+ def print_(*args, **kwargs):
+ fp = kwargs.get("file", sys.stdout)
+ flush = kwargs.pop("flush", False)
+ _print(*args, **kwargs)
+ if flush and fp is not None:
+ fp.flush()
+
+_add_doc(reraise, """Reraise an exception.""")
+
+if sys.version_info[0:2] < (3, 4):
+ def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
+ updated=functools.WRAPPER_UPDATES):
+ def wrapper(f):
+ f = functools.wraps(wrapped, assigned, updated)(f)
+ f.__wrapped__ = wrapped
+ return f
+ return wrapper
+else:
+ wraps = functools.wraps
+
+
+def with_metaclass(meta, *bases):
+ """Create a base class with a metaclass."""
+ # This requires a bit of explanation: the basic idea is to make a dummy
+ # metaclass for one level of class instantiation that replaces itself with
+ # the actual metaclass.
+ class metaclass(meta):
+
+ def __new__(cls, name, this_bases, d):
+ return meta(name, bases, d)
+ return type.__new__(metaclass, 'temporary_class', (), {})
+
+
+def add_metaclass(metaclass):
+ """Class decorator for creating a class with a metaclass."""
+ def wrapper(cls):
+ orig_vars = cls.__dict__.copy()
+ slots = orig_vars.get('__slots__')
+ if slots is not None:
+ if isinstance(slots, str):
+ slots = [slots]
+ for slots_var in slots:
+ orig_vars.pop(slots_var)
+ orig_vars.pop('__dict__', None)
+ orig_vars.pop('__weakref__', None)
+ return metaclass(cls.__name__, cls.__bases__, orig_vars)
+ return wrapper
+
+
+def python_2_unicode_compatible(klass):
+ """
+ A decorator that defines __unicode__ and __str__ methods under Python 2.
+ Under Python 3 it does nothing.
+
+ To support Python 2 and 3 with a single code base, define a __str__ method
+ returning text and apply this decorator to the class.
+ """
+ if PY2:
+ if '__str__' not in klass.__dict__:
+ raise ValueError("@python_2_unicode_compatible cannot be applied "
+ "to %s because it doesn't define __str__()." %
+ klass.__name__)
+ klass.__unicode__ = klass.__str__
+ klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
+ return klass
+
+
+# Complete the moves implementation.
+# This code is at the end of this module to speed up module loading.
+# Turn this module into a package.
+__path__ = [] # required for PEP 302 and PEP 451
+__package__ = __name__ # see PEP 366 @ReservedAssignment
+if globals().get("__spec__") is not None:
+ __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
+# Remove other six meta path importers, since they cause problems. This can
+# happen if six is removed from sys.modules and then reloaded. (Setuptools does
+# this for some reason.)
+if sys.meta_path:
+ for i, importer in enumerate(sys.meta_path):
+ # Here's some real nastiness: Another "instance" of the six module might
+ # be floating around. Therefore, we can't use isinstance() to check for
+ # the six meta path importer, since the other six instance will have
+ # inserted an importer with different class.
+ if (type(importer).__name__ == "_SixMetaPathImporter" and
+ importer.name == __name__):
+ del sys.meta_path[i]
+ break
+ del i, importer
+# Finally, add the importer to the meta path import hook.
+sys.meta_path.append(_importer)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py
new file mode 100644
index 000000000..2aeeeff91
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: MIT
+import sys
+
+try:
+ # Our match_hostname function is the same as 3.5's, so we only want to
+ # import the match_hostname function if it's at least that good.
+ if sys.version_info < (3, 5):
+ raise ImportError("Fallback to vendored code")
+
+ from ssl import CertificateError, match_hostname
+except ImportError:
+ try:
+ # Backport of the function from a pypi module
+ from backports.ssl_match_hostname import CertificateError, match_hostname
+ except ImportError:
+ # Our vendored copy
+ from ._implementation import CertificateError, match_hostname
+
+# Not needed, but documenting what we provide.
+__all__ = ('CertificateError', 'match_hostname')
diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py b/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py
new file mode 100644
index 000000000..647e081da
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py
@@ -0,0 +1,156 @@
+"""The match_hostname() function from Python 3.3.3, essential when using SSL."""
+
+# SPDX-License-Identifier: Python-2.0
+
+import re
+import sys
+
+# ipaddress has been backported to 2.6+ in pypi. If it is installed on the
+# system, use it to handle IPAddress ServerAltnames (this was added in
+# python-3.5) otherwise only do DNS matching. This allows
+# backports.ssl_match_hostname to continue to be used all the way back to
+# python-2.4.
+try:
+ import ipaddress
+except ImportError:
+ ipaddress = None
+
+__version__ = '3.5.0.1'
+
+
+class CertificateError(ValueError):
+ pass
+
+
+def _dnsname_match(dn, hostname, max_wildcards=1):
+ """Matching according to RFC 6125, section 6.4.3
+
+ http://tools.ietf.org/html/rfc6125#section-6.4.3
+ """
+ pats = []
+ if not dn:
+ return False
+
+ # Ported from python3-syntax:
+ # leftmost, *remainder = dn.split(r'.')
+ parts = dn.split(r'.')
+ leftmost = parts[0]
+ remainder = parts[1:]
+
+ wildcards = leftmost.count('*')
+ if wildcards > max_wildcards:
+ # Issue #17980: avoid denials of service by refusing more
+ # than one wildcard per fragment. A survey of established
+ # policy among SSL implementations showed it to be a
+ # reasonable choice.
+ raise CertificateError(
+ "too many wildcards in certificate DNS name: " + repr(dn))
+
+ # speed up common case w/o wildcards
+ if not wildcards:
+ return dn.lower() == hostname.lower()
+
+ # RFC 6125, section 6.4.3, subitem 1.
+ # The client SHOULD NOT attempt to match a presented identifier in which
+ # the wildcard character comprises a label other than the left-most label.
+ if leftmost == '*':
+ # When '*' is a fragment by itself, it matches a non-empty dotless
+ # fragment.
+ pats.append('[^.]+')
+ elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
+ # RFC 6125, section 6.4.3, subitem 3.
+ # The client SHOULD NOT attempt to match a presented identifier
+ # where the wildcard character is embedded within an A-label or
+ # U-label of an internationalized domain name.
+ pats.append(re.escape(leftmost))
+ else:
+ # Otherwise, '*' matches any dotless string, e.g. www*
+ pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
+
+ # add the remaining fragments, ignore any wildcards
+ for frag in remainder:
+ pats.append(re.escape(frag))
+
+ pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
+ return pat.match(hostname)
+
+
+def _to_unicode(obj):
+ if isinstance(obj, str) and sys.version_info < (3,):
+ obj = unicode(obj, encoding='ascii', errors='strict')
+ return obj
+
+def _ipaddress_match(ipname, host_ip):
+ """Exact matching of IP addresses.
+
+ RFC 6125 explicitly doesn't define an algorithm for this
+ (section 1.7.2 - "Out of Scope").
+ """
+ # OpenSSL may add a trailing newline to a subjectAltName's IP address
+ # Divergence from upstream: ipaddress can't handle byte str
+ ip = ipaddress.ip_address(_to_unicode(ipname).rstrip())
+ return ip == host_ip
+
+
+def match_hostname(cert, hostname):
+ """Verify that *cert* (in decoded format as returned by
+ SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
+ rules are followed, but IP addresses are not accepted for *hostname*.
+
+ CertificateError is raised on failure. On success, the function
+ returns nothing.
+ """
+ if not cert:
+ raise ValueError("empty or no certificate, match_hostname needs a "
+ "SSL socket or SSL context with either "
+ "CERT_OPTIONAL or CERT_REQUIRED")
+ try:
+ # Divergence from upstream: ipaddress can't handle byte str
+ host_ip = ipaddress.ip_address(_to_unicode(hostname))
+ except ValueError:
+ # Not an IP address (common case)
+ host_ip = None
+ except UnicodeError:
+ # Divergence from upstream: Have to deal with ipaddress not taking
+ # byte strings. addresses should be all ascii, so we consider it not
+ # an ipaddress in this case
+ host_ip = None
+ except AttributeError:
+ # Divergence from upstream: Make ipaddress library optional
+ if ipaddress is None:
+ host_ip = None
+ else:
+ raise
+ dnsnames = []
+ san = cert.get('subjectAltName', ())
+ for key, value in san:
+ if key == 'DNS':
+ if host_ip is None and _dnsname_match(value, hostname):
+ return
+ dnsnames.append(value)
+ elif key == 'IP Address':
+ if host_ip is not None and _ipaddress_match(value, host_ip):
+ return
+ dnsnames.append(value)
+ if not dnsnames:
+ # The subject is only checked when there is no dNSName entry
+ # in subjectAltName
+ for sub in cert.get('subject', ()):
+ for key, value in sub:
+ # XXX according to RFC 2818, the most specific Common Name
+ # must be used.
+ if key == 'commonName':
+ if _dnsname_match(value, hostname):
+ return
+ dnsnames.append(value)
+ if len(dnsnames) > 1:
+ raise CertificateError("hostname %r "
+ "doesn't match either of %s"
+ % (hostname, ', '.join(map(repr, dnsnames))))
+ elif len(dnsnames) == 1:
+ raise CertificateError("hostname %r "
+ "doesn't match %r"
+ % (hostname, dnsnames[0]))
+ else:
+ raise CertificateError("no appropriate commonName or "
+ "subjectAltName fields were found")
diff --git a/collectors/python.d.plugin/python_modules/urllib3/poolmanager.py b/collectors/python.d.plugin/python_modules/urllib3/poolmanager.py
new file mode 100644
index 000000000..adea9bc01
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/poolmanager.py
@@ -0,0 +1,441 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+import collections
+import functools
+import logging
+
+from ._collections import RecentlyUsedContainer
+from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool
+from .connectionpool import port_by_scheme
+from .exceptions import LocationValueError, MaxRetryError, ProxySchemeUnknown
+from .packages.six.moves.urllib.parse import urljoin
+from .request import RequestMethods
+from .util.url import parse_url
+from .util.retry import Retry
+
+
+__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url']
+
+
+log = logging.getLogger(__name__)
+
+SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs',
+ 'ssl_version', 'ca_cert_dir', 'ssl_context')
+
+# All known keyword arguments that could be provided to the pool manager, its
+# pools, or the underlying connections. This is used to construct a pool key.
+_key_fields = (
+ 'key_scheme', # str
+ 'key_host', # str
+ 'key_port', # int
+ 'key_timeout', # int or float or Timeout
+ 'key_retries', # int or Retry
+ 'key_strict', # bool
+ 'key_block', # bool
+ 'key_source_address', # str
+ 'key_key_file', # str
+ 'key_cert_file', # str
+ 'key_cert_reqs', # str
+ 'key_ca_certs', # str
+ 'key_ssl_version', # str
+ 'key_ca_cert_dir', # str
+ 'key_ssl_context', # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext
+ 'key_maxsize', # int
+ 'key_headers', # dict
+ 'key__proxy', # parsed proxy url
+ 'key__proxy_headers', # dict
+ 'key_socket_options', # list of (level (int), optname (int), value (int or str)) tuples
+ 'key__socks_options', # dict
+ 'key_assert_hostname', # bool or string
+ 'key_assert_fingerprint', # str
+)
+
+#: The namedtuple class used to construct keys for the connection pool.
+#: All custom key schemes should include the fields in this key at a minimum.
+PoolKey = collections.namedtuple('PoolKey', _key_fields)
+
+
+def _default_key_normalizer(key_class, request_context):
+ """
+ Create a pool key out of a request context dictionary.
+
+ According to RFC 3986, both the scheme and host are case-insensitive.
+ Therefore, this function normalizes both before constructing the pool
+ key for an HTTPS request. If you wish to change this behaviour, provide
+ alternate callables to ``key_fn_by_scheme``.
+
+ :param key_class:
+ The class to use when constructing the key. This should be a namedtuple
+ with the ``scheme`` and ``host`` keys at a minimum.
+ :type key_class: namedtuple
+ :param request_context:
+ A dictionary-like object that contain the context for a request.
+ :type request_context: dict
+
+ :return: A namedtuple that can be used as a connection pool key.
+ :rtype: PoolKey
+ """
+ # Since we mutate the dictionary, make a copy first
+ context = request_context.copy()
+ context['scheme'] = context['scheme'].lower()
+ context['host'] = context['host'].lower()
+
+ # These are both dictionaries and need to be transformed into frozensets
+ for key in ('headers', '_proxy_headers', '_socks_options'):
+ if key in context and context[key] is not None:
+ context[key] = frozenset(context[key].items())
+
+ # The socket_options key may be a list and needs to be transformed into a
+ # tuple.
+ socket_opts = context.get('socket_options')
+ if socket_opts is not None:
+ context['socket_options'] = tuple(socket_opts)
+
+ # Map the kwargs to the names in the namedtuple - this is necessary since
+ # namedtuples can't have fields starting with '_'.
+ for key in list(context.keys()):
+ context['key_' + key] = context.pop(key)
+
+ # Default to ``None`` for keys missing from the context
+ for field in key_class._fields:
+ if field not in context:
+ context[field] = None
+
+ return key_class(**context)
+
+
+#: A dictionary that maps a scheme to a callable that creates a pool key.
+#: This can be used to alter the way pool keys are constructed, if desired.
+#: Each PoolManager makes a copy of this dictionary so they can be configured
+#: globally here, or individually on the instance.
+key_fn_by_scheme = {
+ 'http': functools.partial(_default_key_normalizer, PoolKey),
+ 'https': functools.partial(_default_key_normalizer, PoolKey),
+}
+
+pool_classes_by_scheme = {
+ 'http': HTTPConnectionPool,
+ 'https': HTTPSConnectionPool,
+}
+
+
+class PoolManager(RequestMethods):
+ """
+ Allows for arbitrary requests while transparently keeping track of
+ necessary connection pools for you.
+
+ :param num_pools:
+ Number of connection pools to cache before discarding the least
+ recently used pool.
+
+ :param headers:
+ Headers to include with all requests, unless other headers are given
+ explicitly.
+
+ :param \\**connection_pool_kw:
+ Additional parameters are used to create fresh
+ :class:`urllib3.connectionpool.ConnectionPool` instances.
+
+ Example::
+
+ >>> manager = PoolManager(num_pools=2)
+ >>> r = manager.request('GET', 'http://google.com/')
+ >>> r = manager.request('GET', 'http://google.com/mail')
+ >>> r = manager.request('GET', 'http://yahoo.com/')
+ >>> len(manager.pools)
+ 2
+
+ """
+
+ proxy = None
+
+ def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
+ RequestMethods.__init__(self, headers)
+ self.connection_pool_kw = connection_pool_kw
+ self.pools = RecentlyUsedContainer(num_pools,
+ dispose_func=lambda p: p.close())
+
+ # Locally set the pool classes and keys so other PoolManagers can
+ # override them.
+ self.pool_classes_by_scheme = pool_classes_by_scheme
+ self.key_fn_by_scheme = key_fn_by_scheme.copy()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.clear()
+ # Return False to re-raise any potential exceptions
+ return False
+
+ def _new_pool(self, scheme, host, port, request_context=None):
+ """
+ Create a new :class:`ConnectionPool` based on host, port, scheme, and
+ any additional pool keyword arguments.
+
+ If ``request_context`` is provided, it is provided as keyword arguments
+ to the pool class used. This method is used to actually create the
+ connection pools handed out by :meth:`connection_from_url` and
+ companion methods. It is intended to be overridden for customization.
+ """
+ pool_cls = self.pool_classes_by_scheme[scheme]
+ if request_context is None:
+ request_context = self.connection_pool_kw.copy()
+
+ # Although the context has everything necessary to create the pool,
+ # this function has historically only used the scheme, host, and port
+ # in the positional args. When an API change is acceptable these can
+ # be removed.
+ for key in ('scheme', 'host', 'port'):
+ request_context.pop(key, None)
+
+ if scheme == 'http':
+ for kw in SSL_KEYWORDS:
+ request_context.pop(kw, None)
+
+ return pool_cls(host, port, **request_context)
+
+ def clear(self):
+ """
+ Empty our store of pools and direct them all to close.
+
+ This will not affect in-flight connections, but they will not be
+ re-used after completion.
+ """
+ self.pools.clear()
+
+ def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None):
+ """
+ Get a :class:`ConnectionPool` based on the host, port, and scheme.
+
+ If ``port`` isn't given, it will be derived from the ``scheme`` using
+ ``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is
+ provided, it is merged with the instance's ``connection_pool_kw``
+ variable and used to create the new connection pool, if one is
+ needed.
+ """
+
+ if not host:
+ raise LocationValueError("No host specified.")
+
+ request_context = self._merge_pool_kwargs(pool_kwargs)
+ request_context['scheme'] = scheme or 'http'
+ if not port:
+ port = port_by_scheme.get(request_context['scheme'].lower(), 80)
+ request_context['port'] = port
+ request_context['host'] = host
+
+ return self.connection_from_context(request_context)
+
+ def connection_from_context(self, request_context):
+ """
+ Get a :class:`ConnectionPool` based on the request context.
+
+ ``request_context`` must at least contain the ``scheme`` key and its
+ value must be a key in ``key_fn_by_scheme`` instance variable.
+ """
+ scheme = request_context['scheme'].lower()
+ pool_key_constructor = self.key_fn_by_scheme[scheme]
+ pool_key = pool_key_constructor(request_context)
+
+ return self.connection_from_pool_key(pool_key, request_context=request_context)
+
+ def connection_from_pool_key(self, pool_key, request_context=None):
+ """
+ Get a :class:`ConnectionPool` based on the provided pool key.
+
+ ``pool_key`` should be a namedtuple that only contains immutable
+ objects. At a minimum it must have the ``scheme``, ``host``, and
+ ``port`` fields.
+ """
+ with self.pools.lock:
+ # If the scheme, host, or port doesn't match existing open
+ # connections, open a new ConnectionPool.
+ pool = self.pools.get(pool_key)
+ if pool:
+ return pool
+
+ # Make a fresh ConnectionPool of the desired type
+ scheme = request_context['scheme']
+ host = request_context['host']
+ port = request_context['port']
+ pool = self._new_pool(scheme, host, port, request_context=request_context)
+ self.pools[pool_key] = pool
+
+ return pool
+
+ def connection_from_url(self, url, pool_kwargs=None):
+ """
+ Similar to :func:`urllib3.connectionpool.connection_from_url`.
+
+ If ``pool_kwargs`` is not provided and a new pool needs to be
+ constructed, ``self.connection_pool_kw`` is used to initialize
+ the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs``
+ is provided, it is used instead. Note that if a new pool does not
+ need to be created for the request, the provided ``pool_kwargs`` are
+ not used.
+ """
+ u = parse_url(url)
+ return self.connection_from_host(u.host, port=u.port, scheme=u.scheme,
+ pool_kwargs=pool_kwargs)
+
+ def _merge_pool_kwargs(self, override):
+ """
+ Merge a dictionary of override values for self.connection_pool_kw.
+
+ This does not modify self.connection_pool_kw and returns a new dict.
+ Any keys in the override dictionary with a value of ``None`` are
+ removed from the merged dictionary.
+ """
+ base_pool_kwargs = self.connection_pool_kw.copy()
+ if override:
+ for key, value in override.items():
+ if value is None:
+ try:
+ del base_pool_kwargs[key]
+ except KeyError:
+ pass
+ else:
+ base_pool_kwargs[key] = value
+ return base_pool_kwargs
+
+ def urlopen(self, method, url, redirect=True, **kw):
+ """
+ Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen`
+ with custom cross-host redirect logic and only sends the request-uri
+ portion of the ``url``.
+
+ The given ``url`` parameter must be absolute, such that an appropriate
+ :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
+ """
+ u = parse_url(url)
+ conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
+
+ kw['assert_same_host'] = False
+ kw['redirect'] = False
+ if 'headers' not in kw:
+ kw['headers'] = self.headers
+
+ if self.proxy is not None and u.scheme == "http":
+ response = conn.urlopen(method, url, **kw)
+ else:
+ response = conn.urlopen(method, u.request_uri, **kw)
+
+ redirect_location = redirect and response.get_redirect_location()
+ if not redirect_location:
+ return response
+
+ # Support relative URLs for redirecting.
+ redirect_location = urljoin(url, redirect_location)
+
+ # RFC 7231, Section 6.4.4
+ if response.status == 303:
+ method = 'GET'
+
+ retries = kw.get('retries')
+ if not isinstance(retries, Retry):
+ retries = Retry.from_int(retries, redirect=redirect)
+
+ try:
+ retries = retries.increment(method, url, response=response, _pool=conn)
+ except MaxRetryError:
+ if retries.raise_on_redirect:
+ raise
+ return response
+
+ kw['retries'] = retries
+ kw['redirect'] = redirect
+
+ log.info("Redirecting %s -> %s", url, redirect_location)
+ return self.urlopen(method, redirect_location, **kw)
+
+
+class ProxyManager(PoolManager):
+ """
+ Behaves just like :class:`PoolManager`, but sends all requests through
+ the defined proxy, using the CONNECT method for HTTPS URLs.
+
+ :param proxy_url:
+ The URL of the proxy to be used.
+
+ :param proxy_headers:
+ A dictionary contaning headers that will be sent to the proxy. In case
+ of HTTP they are being sent with each request, while in the
+ HTTPS/CONNECT case they are sent only once. Could be used for proxy
+ authentication.
+
+ Example:
+ >>> proxy = urllib3.ProxyManager('http://localhost:3128/')
+ >>> r1 = proxy.request('GET', 'http://google.com/')
+ >>> r2 = proxy.request('GET', 'http://httpbin.org/')
+ >>> len(proxy.pools)
+ 1
+ >>> r3 = proxy.request('GET', 'https://httpbin.org/')
+ >>> r4 = proxy.request('GET', 'https://twitter.com/')
+ >>> len(proxy.pools)
+ 3
+
+ """
+
+ def __init__(self, proxy_url, num_pools=10, headers=None,
+ proxy_headers=None, **connection_pool_kw):
+
+ if isinstance(proxy_url, HTTPConnectionPool):
+ proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host,
+ proxy_url.port)
+ proxy = parse_url(proxy_url)
+ if not proxy.port:
+ port = port_by_scheme.get(proxy.scheme, 80)
+ proxy = proxy._replace(port=port)
+
+ if proxy.scheme not in ("http", "https"):
+ raise ProxySchemeUnknown(proxy.scheme)
+
+ self.proxy = proxy
+ self.proxy_headers = proxy_headers or {}
+
+ connection_pool_kw['_proxy'] = self.proxy
+ connection_pool_kw['_proxy_headers'] = self.proxy_headers
+
+ super(ProxyManager, self).__init__(
+ num_pools, headers, **connection_pool_kw)
+
+ def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None):
+ if scheme == "https":
+ return super(ProxyManager, self).connection_from_host(
+ host, port, scheme, pool_kwargs=pool_kwargs)
+
+ return super(ProxyManager, self).connection_from_host(
+ self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs)
+
+ def _set_proxy_headers(self, url, headers=None):
+ """
+ Sets headers needed by proxies: specifically, the Accept and Host
+ headers. Only sets headers not provided by the user.
+ """
+ headers_ = {'Accept': '*/*'}
+
+ netloc = parse_url(url).netloc
+ if netloc:
+ headers_['Host'] = netloc
+
+ if headers:
+ headers_.update(headers)
+ return headers_
+
+ def urlopen(self, method, url, redirect=True, **kw):
+ "Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
+ u = parse_url(url)
+
+ if u.scheme == "http":
+ # For proxied HTTPS requests, httplib sets the necessary headers
+ # on the CONNECT to the proxy. For HTTP, we'll definitely
+ # need to set 'Host' at the very least.
+ headers = kw.get('headers', self.headers)
+ kw['headers'] = self._set_proxy_headers(url, headers)
+
+ return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw)
+
+
+def proxy_from_url(url, **kw):
+ return ProxyManager(proxy_url=url, **kw)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/request.py b/collectors/python.d.plugin/python_modules/urllib3/request.py
new file mode 100644
index 000000000..f78331975
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/request.py
@@ -0,0 +1,149 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+
+from .filepost import encode_multipart_formdata
+from .packages.six.moves.urllib.parse import urlencode
+
+
+__all__ = ['RequestMethods']
+
+
+class RequestMethods(object):
+ """
+ Convenience mixin for classes who implement a :meth:`urlopen` method, such
+ as :class:`~urllib3.connectionpool.HTTPConnectionPool` and
+ :class:`~urllib3.poolmanager.PoolManager`.
+
+ Provides behavior for making common types of HTTP request methods and
+ decides which type of request field encoding to use.
+
+ Specifically,
+
+ :meth:`.request_encode_url` is for sending requests whose fields are
+ encoded in the URL (such as GET, HEAD, DELETE).
+
+ :meth:`.request_encode_body` is for sending requests whose fields are
+ encoded in the *body* of the request using multipart or www-form-urlencoded
+ (such as for POST, PUT, PATCH).
+
+ :meth:`.request` is for making any kind of request, it will look up the
+ appropriate encoding format and use one of the above two methods to make
+ the request.
+
+ Initializer parameters:
+
+ :param headers:
+ Headers to include with all requests, unless other headers are given
+ explicitly.
+ """
+
+ _encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS'])
+
+ def __init__(self, headers=None):
+ self.headers = headers or {}
+
+ def urlopen(self, method, url, body=None, headers=None,
+ encode_multipart=True, multipart_boundary=None,
+ **kw): # Abstract
+ raise NotImplemented("Classes extending RequestMethods must implement "
+ "their own ``urlopen`` method.")
+
+ def request(self, method, url, fields=None, headers=None, **urlopen_kw):
+ """
+ Make a request using :meth:`urlopen` with the appropriate encoding of
+ ``fields`` based on the ``method`` used.
+
+ This is a convenience method that requires the least amount of manual
+ effort. It can be used in most situations, while still having the
+ option to drop down to more specific methods when necessary, such as
+ :meth:`request_encode_url`, :meth:`request_encode_body`,
+ or even the lowest level :meth:`urlopen`.
+ """
+ method = method.upper()
+
+ if method in self._encode_url_methods:
+ return self.request_encode_url(method, url, fields=fields,
+ headers=headers,
+ **urlopen_kw)
+ else:
+ return self.request_encode_body(method, url, fields=fields,
+ headers=headers,
+ **urlopen_kw)
+
+ def request_encode_url(self, method, url, fields=None, headers=None,
+ **urlopen_kw):
+ """
+ Make a request using :meth:`urlopen` with the ``fields`` encoded in
+ the url. This is useful for request methods like GET, HEAD, DELETE, etc.
+ """
+ if headers is None:
+ headers = self.headers
+
+ extra_kw = {'headers': headers}
+ extra_kw.update(urlopen_kw)
+
+ if fields:
+ url += '?' + urlencode(fields)
+
+ return self.urlopen(method, url, **extra_kw)
+
+ def request_encode_body(self, method, url, fields=None, headers=None,
+ encode_multipart=True, multipart_boundary=None,
+ **urlopen_kw):
+ """
+ Make a request using :meth:`urlopen` with the ``fields`` encoded in
+ the body. This is useful for request methods like POST, PUT, PATCH, etc.
+
+ When ``encode_multipart=True`` (default), then
+ :meth:`urllib3.filepost.encode_multipart_formdata` is used to encode
+ the payload with the appropriate content type. Otherwise
+ :meth:`urllib.urlencode` is used with the
+ 'application/x-www-form-urlencoded' content type.
+
+ Multipart encoding must be used when posting files, and it's reasonably
+ safe to use it in other times too. However, it may break request
+ signing, such as with OAuth.
+
+ Supports an optional ``fields`` parameter of key/value strings AND
+ key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
+ the MIME type is optional. For example::
+
+ fields = {
+ 'foo': 'bar',
+ 'fakefile': ('foofile.txt', 'contents of foofile'),
+ 'realfile': ('barfile.txt', open('realfile').read()),
+ 'typedfile': ('bazfile.bin', open('bazfile').read(),
+ 'image/jpeg'),
+ 'nonamefile': 'contents of nonamefile field',
+ }
+
+ When uploading a file, providing a filename (the first parameter of the
+ tuple) is optional but recommended to best mimick behavior of browsers.
+
+ Note that if ``headers`` are supplied, the 'Content-Type' header will
+ be overwritten because it depends on the dynamic random boundary string
+ which is used to compose the body of the request. The random boundary
+ string can be explicitly set with the ``multipart_boundary`` parameter.
+ """
+ if headers is None:
+ headers = self.headers
+
+ extra_kw = {'headers': {}}
+
+ if fields:
+ if 'body' in urlopen_kw:
+ raise TypeError(
+ "request got values for both 'fields' and 'body', can only specify one.")
+
+ if encode_multipart:
+ body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary)
+ else:
+ body, content_type = urlencode(fields), 'application/x-www-form-urlencoded'
+
+ extra_kw['body'] = body
+ extra_kw['headers'] = {'Content-Type': content_type}
+
+ extra_kw['headers'].update(headers)
+ extra_kw.update(urlopen_kw)
+
+ return self.urlopen(method, url, **extra_kw)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/response.py b/collectors/python.d.plugin/python_modules/urllib3/response.py
new file mode 100644
index 000000000..cf14a3076
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/response.py
@@ -0,0 +1,623 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+from contextlib import contextmanager
+import zlib
+import io
+import logging
+from socket import timeout as SocketTimeout
+from socket import error as SocketError
+
+from ._collections import HTTPHeaderDict
+from .exceptions import (
+ BodyNotHttplibCompatible, ProtocolError, DecodeError, ReadTimeoutError,
+ ResponseNotChunked, IncompleteRead, InvalidHeader
+)
+from .packages.six import string_types as basestring, binary_type, PY3
+from .packages.six.moves import http_client as httplib
+from .connection import HTTPException, BaseSSLError
+from .util.response import is_fp_closed, is_response_to_head
+
+log = logging.getLogger(__name__)
+
+
+class DeflateDecoder(object):
+
+ def __init__(self):
+ self._first_try = True
+ self._data = binary_type()
+ self._obj = zlib.decompressobj()
+
+ def __getattr__(self, name):
+ return getattr(self._obj, name)
+
+ def decompress(self, data):
+ if not data:
+ return data
+
+ if not self._first_try:
+ return self._obj.decompress(data)
+
+ self._data += data
+ try:
+ decompressed = self._obj.decompress(data)
+ if decompressed:
+ self._first_try = False
+ self._data = None
+ return decompressed
+ except zlib.error:
+ self._first_try = False
+ self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
+ try:
+ return self.decompress(self._data)
+ finally:
+ self._data = None
+
+
+class GzipDecoder(object):
+
+ def __init__(self):
+ self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
+
+ def __getattr__(self, name):
+ return getattr(self._obj, name)
+
+ def decompress(self, data):
+ if not data:
+ return data
+ return self._obj.decompress(data)
+
+
+def _get_decoder(mode):
+ if mode == 'gzip':
+ return GzipDecoder()
+
+ return DeflateDecoder()
+
+
+class HTTPResponse(io.IOBase):
+ """
+ HTTP Response container.
+
+ Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
+ loaded and decoded on-demand when the ``data`` property is accessed. This
+ class is also compatible with the Python standard library's :mod:`io`
+ module, and can hence be treated as a readable object in the context of that
+ framework.
+
+ Extra parameters for behaviour not present in httplib.HTTPResponse:
+
+ :param preload_content:
+ If True, the response's body will be preloaded during construction.
+
+ :param decode_content:
+ If True, attempts to decode specific content-encoding's based on headers
+ (like 'gzip' and 'deflate') will be skipped and raw data will be used
+ instead.
+
+ :param original_response:
+ When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
+ object, it's convenient to include the original for debug purposes. It's
+ otherwise unused.
+
+ :param retries:
+ The retries contains the last :class:`~urllib3.util.retry.Retry` that
+ was used during the request.
+
+ :param enforce_content_length:
+ Enforce content length checking. Body returned by server must match
+ value of Content-Length header, if present. Otherwise, raise error.
+ """
+
+ CONTENT_DECODERS = ['gzip', 'deflate']
+ REDIRECT_STATUSES = [301, 302, 303, 307, 308]
+
+ def __init__(self, body='', headers=None, status=0, version=0, reason=None,
+ strict=0, preload_content=True, decode_content=True,
+ original_response=None, pool=None, connection=None,
+ retries=None, enforce_content_length=False, request_method=None):
+
+ if isinstance(headers, HTTPHeaderDict):
+ self.headers = headers
+ else:
+ self.headers = HTTPHeaderDict(headers)
+ self.status = status
+ self.version = version
+ self.reason = reason
+ self.strict = strict
+ self.decode_content = decode_content
+ self.retries = retries
+ self.enforce_content_length = enforce_content_length
+
+ self._decoder = None
+ self._body = None
+ self._fp = None
+ self._original_response = original_response
+ self._fp_bytes_read = 0
+
+ if body and isinstance(body, (basestring, binary_type)):
+ self._body = body
+
+ self._pool = pool
+ self._connection = connection
+
+ if hasattr(body, 'read'):
+ self._fp = body
+
+ # Are we using the chunked-style of transfer encoding?
+ self.chunked = False
+ self.chunk_left = None
+ tr_enc = self.headers.get('transfer-encoding', '').lower()
+ # Don't incur the penalty of creating a list and then discarding it
+ encodings = (enc.strip() for enc in tr_enc.split(","))
+ if "chunked" in encodings:
+ self.chunked = True
+
+ # Determine length of response
+ self.length_remaining = self._init_length(request_method)
+
+ # If requested, preload the body.
+ if preload_content and not self._body:
+ self._body = self.read(decode_content=decode_content)
+
+ def get_redirect_location(self):
+ """
+ Should we redirect and where to?
+
+ :returns: Truthy redirect location string if we got a redirect status
+ code and valid location. ``None`` if redirect status and no
+ location. ``False`` if not a redirect status code.
+ """
+ if self.status in self.REDIRECT_STATUSES:
+ return self.headers.get('location')
+
+ return False
+
+ def release_conn(self):
+ if not self._pool or not self._connection:
+ return
+
+ self._pool._put_conn(self._connection)
+ self._connection = None
+
+ @property
+ def data(self):
+ # For backwords-compat with earlier urllib3 0.4 and earlier.
+ if self._body:
+ return self._body
+
+ if self._fp:
+ return self.read(cache_content=True)
+
+ @property
+ def connection(self):
+ return self._connection
+
+ def tell(self):
+ """
+ Obtain the number of bytes pulled over the wire so far. May differ from
+ the amount of content returned by :meth:``HTTPResponse.read`` if bytes
+ are encoded on the wire (e.g, compressed).
+ """
+ return self._fp_bytes_read
+
+ def _init_length(self, request_method):
+ """
+ Set initial length value for Response content if available.
+ """
+ length = self.headers.get('content-length')
+
+ if length is not None and self.chunked:
+ # This Response will fail with an IncompleteRead if it can't be
+ # received as chunked. This method falls back to attempt reading
+ # the response before raising an exception.
+ log.warning("Received response with both Content-Length and "
+ "Transfer-Encoding set. This is expressly forbidden "
+ "by RFC 7230 sec 3.3.2. Ignoring Content-Length and "
+ "attempting to process response as Transfer-Encoding: "
+ "chunked.")
+ return None
+
+ elif length is not None:
+ try:
+ # RFC 7230 section 3.3.2 specifies multiple content lengths can
+ # be sent in a single Content-Length header
+ # (e.g. Content-Length: 42, 42). This line ensures the values
+ # are all valid ints and that as long as the `set` length is 1,
+ # all values are the same. Otherwise, the header is invalid.
+ lengths = set([int(val) for val in length.split(',')])
+ if len(lengths) > 1:
+ raise InvalidHeader("Content-Length contained multiple "
+ "unmatching values (%s)" % length)
+ length = lengths.pop()
+ except ValueError:
+ length = None
+ else:
+ if length < 0:
+ length = None
+
+ # Convert status to int for comparison
+ # In some cases, httplib returns a status of "_UNKNOWN"
+ try:
+ status = int(self.status)
+ except ValueError:
+ status = 0
+
+ # Check for responses that shouldn't include a body
+ if status in (204, 304) or 100 <= status < 200 or request_method == 'HEAD':
+ length = 0
+
+ return length
+
+ def _init_decoder(self):
+ """
+ Set-up the _decoder attribute if necessary.
+ """
+ # Note: content-encoding value should be case-insensitive, per RFC 7230
+ # Section 3.2
+ content_encoding = self.headers.get('content-encoding', '').lower()
+ if self._decoder is None and content_encoding in self.CONTENT_DECODERS:
+ self._decoder = _get_decoder(content_encoding)
+
+ def _decode(self, data, decode_content, flush_decoder):
+ """
+ Decode the data passed in and potentially flush the decoder.
+ """
+ try:
+ if decode_content and self._decoder:
+ data = self._decoder.decompress(data)
+ except (IOError, zlib.error) as e:
+ content_encoding = self.headers.get('content-encoding', '').lower()
+ raise DecodeError(
+ "Received response with content-encoding: %s, but "
+ "failed to decode it." % content_encoding, e)
+
+ if flush_decoder and decode_content:
+ data += self._flush_decoder()
+
+ return data
+
+ def _flush_decoder(self):
+ """
+ Flushes the decoder. Should only be called if the decoder is actually
+ being used.
+ """
+ if self._decoder:
+ buf = self._decoder.decompress(b'')
+ return buf + self._decoder.flush()
+
+ return b''
+
+ @contextmanager
+ def _error_catcher(self):
+ """
+ Catch low-level python exceptions, instead re-raising urllib3
+ variants, so that low-level exceptions are not leaked in the
+ high-level api.
+
+ On exit, release the connection back to the pool.
+ """
+ clean_exit = False
+
+ try:
+ try:
+ yield
+
+ except SocketTimeout:
+ # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
+ # there is yet no clean way to get at it from this context.
+ raise ReadTimeoutError(self._pool, None, 'Read timed out.')
+
+ except BaseSSLError as e:
+ # FIXME: Is there a better way to differentiate between SSLErrors?
+ if 'read operation timed out' not in str(e): # Defensive:
+ # This shouldn't happen but just in case we're missing an edge
+ # case, let's avoid swallowing SSL errors.
+ raise
+
+ raise ReadTimeoutError(self._pool, None, 'Read timed out.')
+
+ except (HTTPException, SocketError) as e:
+ # This includes IncompleteRead.
+ raise ProtocolError('Connection broken: %r' % e, e)
+
+ # If no exception is thrown, we should avoid cleaning up
+ # unnecessarily.
+ clean_exit = True
+ finally:
+ # If we didn't terminate cleanly, we need to throw away our
+ # connection.
+ if not clean_exit:
+ # The response may not be closed but we're not going to use it
+ # anymore so close it now to ensure that the connection is
+ # released back to the pool.
+ if self._original_response:
+ self._original_response.close()
+
+ # Closing the response may not actually be sufficient to close
+ # everything, so if we have a hold of the connection close that
+ # too.
+ if self._connection:
+ self._connection.close()
+
+ # If we hold the original response but it's closed now, we should
+ # return the connection back to the pool.
+ if self._original_response and self._original_response.isclosed():
+ self.release_conn()
+
+ def read(self, amt=None, decode_content=None, cache_content=False):
+ """
+ Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
+ parameters: ``decode_content`` and ``cache_content``.
+
+ :param amt:
+ How much of the content to read. If specified, caching is skipped
+ because it doesn't make sense to cache partial content as the full
+ response.
+
+ :param decode_content:
+ If True, will attempt to decode the body based on the
+ 'content-encoding' header.
+
+ :param cache_content:
+ If True, will save the returned data such that the same result is
+ returned despite of the state of the underlying file object. This
+ is useful if you want the ``.data`` property to continue working
+ after having ``.read()`` the file object. (Overridden if ``amt`` is
+ set.)
+ """
+ self._init_decoder()
+ if decode_content is None:
+ decode_content = self.decode_content
+
+ if self._fp is None:
+ return
+
+ flush_decoder = False
+ data = None
+
+ with self._error_catcher():
+ if amt is None:
+ # cStringIO doesn't like amt=None
+ data = self._fp.read()
+ flush_decoder = True
+ else:
+ cache_content = False
+ data = self._fp.read(amt)
+ if amt != 0 and not data: # Platform-specific: Buggy versions of Python.
+ # Close the connection when no data is returned
+ #
+ # This is redundant to what httplib/http.client _should_
+ # already do. However, versions of python released before
+ # December 15, 2012 (http://bugs.python.org/issue16298) do
+ # not properly close the connection in all cases. There is
+ # no harm in redundantly calling close.
+ self._fp.close()
+ flush_decoder = True
+ if self.enforce_content_length and self.length_remaining not in (0, None):
+ # This is an edge case that httplib failed to cover due
+ # to concerns of backward compatibility. We're
+ # addressing it here to make sure IncompleteRead is
+ # raised during streaming, so all calls with incorrect
+ # Content-Length are caught.
+ raise IncompleteRead(self._fp_bytes_read, self.length_remaining)
+
+ if data:
+ self._fp_bytes_read += len(data)
+ if self.length_remaining is not None:
+ self.length_remaining -= len(data)
+
+ data = self._decode(data, decode_content, flush_decoder)
+
+ if cache_content:
+ self._body = data
+
+ return data
+
+ def stream(self, amt=2**16, decode_content=None):
+ """
+ A generator wrapper for the read() method. A call will block until
+ ``amt`` bytes have been read from the connection or until the
+ connection is closed.
+
+ :param amt:
+ How much of the content to read. The generator will return up to
+ much data per iteration, but may return less. This is particularly
+ likely when using compressed data. However, the empty string will
+ never be returned.
+
+ :param decode_content:
+ If True, will attempt to decode the body based on the
+ 'content-encoding' header.
+ """
+ if self.chunked and self.supports_chunked_reads():
+ for line in self.read_chunked(amt, decode_content=decode_content):
+ yield line
+ else:
+ while not is_fp_closed(self._fp):
+ data = self.read(amt=amt, decode_content=decode_content)
+
+ if data:
+ yield data
+
+ @classmethod
+ def from_httplib(ResponseCls, r, **response_kw):
+ """
+ Given an :class:`httplib.HTTPResponse` instance ``r``, return a
+ corresponding :class:`urllib3.response.HTTPResponse` object.
+
+ Remaining parameters are passed to the HTTPResponse constructor, along
+ with ``original_response=r``.
+ """
+ headers = r.msg
+
+ if not isinstance(headers, HTTPHeaderDict):
+ if PY3: # Python 3
+ headers = HTTPHeaderDict(headers.items())
+ else: # Python 2
+ headers = HTTPHeaderDict.from_httplib(headers)
+
+ # HTTPResponse objects in Python 3 don't have a .strict attribute
+ strict = getattr(r, 'strict', 0)
+ resp = ResponseCls(body=r,
+ headers=headers,
+ status=r.status,
+ version=r.version,
+ reason=r.reason,
+ strict=strict,
+ original_response=r,
+ **response_kw)
+ return resp
+
+ # Backwards-compatibility methods for httplib.HTTPResponse
+ def getheaders(self):
+ return self.headers
+
+ def getheader(self, name, default=None):
+ return self.headers.get(name, default)
+
+ # Overrides from io.IOBase
+ def close(self):
+ if not self.closed:
+ self._fp.close()
+
+ if self._connection:
+ self._connection.close()
+
+ @property
+ def closed(self):
+ if self._fp is None:
+ return True
+ elif hasattr(self._fp, 'isclosed'):
+ return self._fp.isclosed()
+ elif hasattr(self._fp, 'closed'):
+ return self._fp.closed
+ else:
+ return True
+
+ def fileno(self):
+ if self._fp is None:
+ raise IOError("HTTPResponse has no file to get a fileno from")
+ elif hasattr(self._fp, "fileno"):
+ return self._fp.fileno()
+ else:
+ raise IOError("The file-like object this HTTPResponse is wrapped "
+ "around has no file descriptor")
+
+ def flush(self):
+ if self._fp is not None and hasattr(self._fp, 'flush'):
+ return self._fp.flush()
+
+ def readable(self):
+ # This method is required for `io` module compatibility.
+ return True
+
+ def readinto(self, b):
+ # This method is required for `io` module compatibility.
+ temp = self.read(len(b))
+ if len(temp) == 0:
+ return 0
+ else:
+ b[:len(temp)] = temp
+ return len(temp)
+
+ def supports_chunked_reads(self):
+ """
+ Checks if the underlying file-like object looks like a
+ httplib.HTTPResponse object. We do this by testing for the fp
+ attribute. If it is present we assume it returns raw chunks as
+ processed by read_chunked().
+ """
+ return hasattr(self._fp, 'fp')
+
+ def _update_chunk_length(self):
+ # First, we'll figure out length of a chunk and then
+ # we'll try to read it from socket.
+ if self.chunk_left is not None:
+ return
+ line = self._fp.fp.readline()
+ line = line.split(b';', 1)[0]
+ try:
+ self.chunk_left = int(line, 16)
+ except ValueError:
+ # Invalid chunked protocol response, abort.
+ self.close()
+ raise httplib.IncompleteRead(line)
+
+ def _handle_chunk(self, amt):
+ returned_chunk = None
+ if amt is None:
+ chunk = self._fp._safe_read(self.chunk_left)
+ returned_chunk = chunk
+ self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
+ self.chunk_left = None
+ elif amt < self.chunk_left:
+ value = self._fp._safe_read(amt)
+ self.chunk_left = self.chunk_left - amt
+ returned_chunk = value
+ elif amt == self.chunk_left:
+ value = self._fp._safe_read(amt)
+ self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
+ self.chunk_left = None
+ returned_chunk = value
+ else: # amt > self.chunk_left
+ returned_chunk = self._fp._safe_read(self.chunk_left)
+ self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
+ self.chunk_left = None
+ return returned_chunk
+
+ def read_chunked(self, amt=None, decode_content=None):
+ """
+ Similar to :meth:`HTTPResponse.read`, but with an additional
+ parameter: ``decode_content``.
+
+ :param decode_content:
+ If True, will attempt to decode the body based on the
+ 'content-encoding' header.
+ """
+ self._init_decoder()
+ # FIXME: Rewrite this method and make it a class with a better structured logic.
+ if not self.chunked:
+ raise ResponseNotChunked(
+ "Response is not chunked. "
+ "Header 'transfer-encoding: chunked' is missing.")
+ if not self.supports_chunked_reads():
+ raise BodyNotHttplibCompatible(
+ "Body should be httplib.HTTPResponse like. "
+ "It should have have an fp attribute which returns raw chunks.")
+
+ # Don't bother reading the body of a HEAD request.
+ if self._original_response and is_response_to_head(self._original_response):
+ self._original_response.close()
+ return
+
+ with self._error_catcher():
+ while True:
+ self._update_chunk_length()
+ if self.chunk_left == 0:
+ break
+ chunk = self._handle_chunk(amt)
+ decoded = self._decode(chunk, decode_content=decode_content,
+ flush_decoder=False)
+ if decoded:
+ yield decoded
+
+ if decode_content:
+ # On CPython and PyPy, we should never need to flush the
+ # decoder. However, on Jython we *might* need to, so
+ # lets defensively do it anyway.
+ decoded = self._flush_decoder()
+ if decoded: # Platform-specific: Jython.
+ yield decoded
+
+ # Chunk content ends with \r\n: discard it.
+ while True:
+ line = self._fp.fp.readline()
+ if not line:
+ # Some sites may not end with '\r\n'.
+ break
+ if line == b'\r\n':
+ break
+
+ # We read everything; close the "file".
+ if self._original_response:
+ self._original_response.close()
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/util/__init__.py
new file mode 100644
index 000000000..bba628d98
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/__init__.py
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+# For backwards compatibility, provide imports that used to be here.
+from .connection import is_connection_dropped
+from .request import make_headers
+from .response import is_fp_closed
+from .ssl_ import (
+ SSLContext,
+ HAS_SNI,
+ IS_PYOPENSSL,
+ IS_SECURETRANSPORT,
+ assert_fingerprint,
+ resolve_cert_reqs,
+ resolve_ssl_version,
+ ssl_wrap_socket,
+)
+from .timeout import (
+ current_time,
+ Timeout,
+)
+
+from .retry import Retry
+from .url import (
+ get_host,
+ parse_url,
+ split_first,
+ Url,
+)
+from .wait import (
+ wait_for_read,
+ wait_for_write
+)
+
+__all__ = (
+ 'HAS_SNI',
+ 'IS_PYOPENSSL',
+ 'IS_SECURETRANSPORT',
+ 'SSLContext',
+ 'Retry',
+ 'Timeout',
+ 'Url',
+ 'assert_fingerprint',
+ 'current_time',
+ 'is_connection_dropped',
+ 'is_fp_closed',
+ 'get_host',
+ 'parse_url',
+ 'make_headers',
+ 'resolve_cert_reqs',
+ 'resolve_ssl_version',
+ 'split_first',
+ 'ssl_wrap_socket',
+ 'wait_for_read',
+ 'wait_for_write'
+)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/connection.py b/collectors/python.d.plugin/python_modules/urllib3/util/connection.py
new file mode 100644
index 000000000..3bd69e8fa
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/connection.py
@@ -0,0 +1,131 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+import socket
+from .wait import wait_for_read
+from .selectors import HAS_SELECT, SelectorError
+
+
+def is_connection_dropped(conn): # Platform-specific
+ """
+ Returns True if the connection is dropped and should be closed.
+
+ :param conn:
+ :class:`httplib.HTTPConnection` object.
+
+ Note: For platforms like AppEngine, this will always return ``False`` to
+ let the platform handle connection recycling transparently for us.
+ """
+ sock = getattr(conn, 'sock', False)
+ if sock is False: # Platform-specific: AppEngine
+ return False
+ if sock is None: # Connection already closed (such as by httplib).
+ return True
+
+ if not HAS_SELECT:
+ return False
+
+ try:
+ return bool(wait_for_read(sock, timeout=0.0))
+ except SelectorError:
+ return True
+
+
+# This function is copied from socket.py in the Python 2.7 standard
+# library test suite. Added to its signature is only `socket_options`.
+# One additional modification is that we avoid binding to IPv6 servers
+# discovered in DNS if the system doesn't have IPv6 functionality.
+def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
+ source_address=None, socket_options=None):
+ """Connect to *address* and return the socket object.
+
+ Convenience function. Connect to *address* (a 2-tuple ``(host,
+ port)``) and return the socket object. Passing the optional
+ *timeout* parameter will set the timeout on the socket instance
+ before attempting to connect. If no *timeout* is supplied, the
+ global default timeout setting returned by :func:`getdefaulttimeout`
+ is used. If *source_address* is set it must be a tuple of (host, port)
+ for the socket to bind as a source address before making the connection.
+ An host of '' or port 0 tells the OS to use the default.
+ """
+
+ host, port = address
+ if host.startswith('['):
+ host = host.strip('[]')
+ err = None
+
+ # Using the value from allowed_gai_family() in the context of getaddrinfo lets
+ # us select whether to work with IPv4 DNS records, IPv6 records, or both.
+ # The original create_connection function always returns all records.
+ family = allowed_gai_family()
+
+ for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):
+ af, socktype, proto, canonname, sa = res
+ sock = None
+ try:
+ sock = socket.socket(af, socktype, proto)
+
+ # If provided, set socket level options before connecting.
+ _set_socket_options(sock, socket_options)
+
+ if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
+ sock.settimeout(timeout)
+ if source_address:
+ sock.bind(source_address)
+ sock.connect(sa)
+ return sock
+
+ except socket.error as e:
+ err = e
+ if sock is not None:
+ sock.close()
+ sock = None
+
+ if err is not None:
+ raise err
+
+ raise socket.error("getaddrinfo returns an empty list")
+
+
+def _set_socket_options(sock, options):
+ if options is None:
+ return
+
+ for opt in options:
+ sock.setsockopt(*opt)
+
+
+def allowed_gai_family():
+ """This function is designed to work in the context of
+ getaddrinfo, where family=socket.AF_UNSPEC is the default and
+ will perform a DNS search for both IPv6 and IPv4 records."""
+
+ family = socket.AF_INET
+ if HAS_IPV6:
+ family = socket.AF_UNSPEC
+ return family
+
+
+def _has_ipv6(host):
+ """ Returns True if the system can bind an IPv6 address. """
+ sock = None
+ has_ipv6 = False
+
+ if socket.has_ipv6:
+ # has_ipv6 returns true if cPython was compiled with IPv6 support.
+ # It does not tell us if the system has IPv6 support enabled. To
+ # determine that we must bind to an IPv6 address.
+ # https://github.com/shazow/urllib3/pull/611
+ # https://bugs.python.org/issue658327
+ try:
+ sock = socket.socket(socket.AF_INET6)
+ sock.bind((host, 0))
+ has_ipv6 = True
+ except Exception:
+ pass
+
+ if sock:
+ sock.close()
+ return has_ipv6
+
+
+HAS_IPV6 = _has_ipv6('::1')
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/request.py b/collectors/python.d.plugin/python_modules/urllib3/util/request.py
new file mode 100644
index 000000000..18f27b032
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/request.py
@@ -0,0 +1,119 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+from base64 import b64encode
+
+from ..packages.six import b, integer_types
+from ..exceptions import UnrewindableBodyError
+
+ACCEPT_ENCODING = 'gzip,deflate'
+_FAILEDTELL = object()
+
+
+def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
+ basic_auth=None, proxy_basic_auth=None, disable_cache=None):
+ """
+ Shortcuts for generating request headers.
+
+ :param keep_alive:
+ If ``True``, adds 'connection: keep-alive' header.
+
+ :param accept_encoding:
+ Can be a boolean, list, or string.
+ ``True`` translates to 'gzip,deflate'.
+ List will get joined by comma.
+ String will be used as provided.
+
+ :param user_agent:
+ String representing the user-agent you want, such as
+ "python-urllib3/0.6"
+
+ :param basic_auth:
+ Colon-separated username:password string for 'authorization: basic ...'
+ auth header.
+
+ :param proxy_basic_auth:
+ Colon-separated username:password string for 'proxy-authorization: basic ...'
+ auth header.
+
+ :param disable_cache:
+ If ``True``, adds 'cache-control: no-cache' header.
+
+ Example::
+
+ >>> make_headers(keep_alive=True, user_agent="Batman/1.0")
+ {'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
+ >>> make_headers(accept_encoding=True)
+ {'accept-encoding': 'gzip,deflate'}
+ """
+ headers = {}
+ if accept_encoding:
+ if isinstance(accept_encoding, str):
+ pass
+ elif isinstance(accept_encoding, list):
+ accept_encoding = ','.join(accept_encoding)
+ else:
+ accept_encoding = ACCEPT_ENCODING
+ headers['accept-encoding'] = accept_encoding
+
+ if user_agent:
+ headers['user-agent'] = user_agent
+
+ if keep_alive:
+ headers['connection'] = 'keep-alive'
+
+ if basic_auth:
+ headers['authorization'] = 'Basic ' + \
+ b64encode(b(basic_auth)).decode('utf-8')
+
+ if proxy_basic_auth:
+ headers['proxy-authorization'] = 'Basic ' + \
+ b64encode(b(proxy_basic_auth)).decode('utf-8')
+
+ if disable_cache:
+ headers['cache-control'] = 'no-cache'
+
+ return headers
+
+
+def set_file_position(body, pos):
+ """
+ If a position is provided, move file to that point.
+ Otherwise, we'll attempt to record a position for future use.
+ """
+ if pos is not None:
+ rewind_body(body, pos)
+ elif getattr(body, 'tell', None) is not None:
+ try:
+ pos = body.tell()
+ except (IOError, OSError):
+ # This differentiates from None, allowing us to catch
+ # a failed `tell()` later when trying to rewind the body.
+ pos = _FAILEDTELL
+
+ return pos
+
+
+def rewind_body(body, body_pos):
+ """
+ Attempt to rewind body to a certain position.
+ Primarily used for request redirects and retries.
+
+ :param body:
+ File-like object that supports seek.
+
+ :param int pos:
+ Position to seek to in file.
+ """
+ body_seek = getattr(body, 'seek', None)
+ if body_seek is not None and isinstance(body_pos, integer_types):
+ try:
+ body_seek(body_pos)
+ except (IOError, OSError):
+ raise UnrewindableBodyError("An error occurred when rewinding request "
+ "body for redirect/retry.")
+ elif body_pos is _FAILEDTELL:
+ raise UnrewindableBodyError("Unable to record file position for rewinding "
+ "request body during a redirect/retry.")
+ else:
+ raise ValueError("body_pos must be of type integer, "
+ "instead it was %s." % type(body_pos))
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/response.py b/collectors/python.d.plugin/python_modules/urllib3/util/response.py
new file mode 100644
index 000000000..e4cda93d4
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/response.py
@@ -0,0 +1,82 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+from ..packages.six.moves import http_client as httplib
+
+from ..exceptions import HeaderParsingError
+
+
+def is_fp_closed(obj):
+ """
+ Checks whether a given file-like object is closed.
+
+ :param obj:
+ The file-like object to check.
+ """
+
+ try:
+ # Check `isclosed()` first, in case Python3 doesn't set `closed`.
+ # GH Issue #928
+ return obj.isclosed()
+ except AttributeError:
+ pass
+
+ try:
+ # Check via the official file-like-object way.
+ return obj.closed
+ except AttributeError:
+ pass
+
+ try:
+ # Check if the object is a container for another file-like object that
+ # gets released on exhaustion (e.g. HTTPResponse).
+ return obj.fp is None
+ except AttributeError:
+ pass
+
+ raise ValueError("Unable to determine whether fp is closed.")
+
+
+def assert_header_parsing(headers):
+ """
+ Asserts whether all headers have been successfully parsed.
+ Extracts encountered errors from the result of parsing headers.
+
+ Only works on Python 3.
+
+ :param headers: Headers to verify.
+ :type headers: `httplib.HTTPMessage`.
+
+ :raises urllib3.exceptions.HeaderParsingError:
+ If parsing errors are found.
+ """
+
+ # This will fail silently if we pass in the wrong kind of parameter.
+ # To make debugging easier add an explicit check.
+ if not isinstance(headers, httplib.HTTPMessage):
+ raise TypeError('expected httplib.Message, got {0}.'.format(
+ type(headers)))
+
+ defects = getattr(headers, 'defects', None)
+ get_payload = getattr(headers, 'get_payload', None)
+
+ unparsed_data = None
+ if get_payload: # Platform-specific: Python 3.
+ unparsed_data = get_payload()
+
+ if defects or unparsed_data:
+ raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
+
+
+def is_response_to_head(response):
+ """
+ Checks whether the request of a response has been a HEAD-request.
+ Handles the quirks of AppEngine.
+
+ :param conn:
+ :type conn: :class:`httplib.HTTPResponse`
+ """
+ # FIXME: Can we do this somehow without accessing private httplib _method?
+ method = response._method
+ if isinstance(method, int): # Platform-specific: Appengine
+ return method == 3
+ return method.upper() == 'HEAD'
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/retry.py b/collectors/python.d.plugin/python_modules/urllib3/util/retry.py
new file mode 100644
index 000000000..61e63afec
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/retry.py
@@ -0,0 +1,402 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+import time
+import logging
+from collections import namedtuple
+from itertools import takewhile
+import email
+import re
+
+from ..exceptions import (
+ ConnectTimeoutError,
+ MaxRetryError,
+ ProtocolError,
+ ReadTimeoutError,
+ ResponseError,
+ InvalidHeader,
+)
+from ..packages import six
+
+
+log = logging.getLogger(__name__)
+
+# Data structure for representing the metadata of requests that result in a retry.
+RequestHistory = namedtuple('RequestHistory', ["method", "url", "error",
+ "status", "redirect_location"])
+
+
+class Retry(object):
+ """ Retry configuration.
+
+ Each retry attempt will create a new Retry object with updated values, so
+ they can be safely reused.
+
+ Retries can be defined as a default for a pool::
+
+ retries = Retry(connect=5, read=2, redirect=5)
+ http = PoolManager(retries=retries)
+ response = http.request('GET', 'http://example.com/')
+
+ Or per-request (which overrides the default for the pool)::
+
+ response = http.request('GET', 'http://example.com/', retries=Retry(10))
+
+ Retries can be disabled by passing ``False``::
+
+ response = http.request('GET', 'http://example.com/', retries=False)
+
+ Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
+ retries are disabled, in which case the causing exception will be raised.
+
+ :param int total:
+ Total number of retries to allow. Takes precedence over other counts.
+
+ Set to ``None`` to remove this constraint and fall back on other
+ counts. It's a good idea to set this to some sensibly-high value to
+ account for unexpected edge cases and avoid infinite retry loops.
+
+ Set to ``0`` to fail on the first retry.
+
+ Set to ``False`` to disable and imply ``raise_on_redirect=False``.
+
+ :param int connect:
+ How many connection-related errors to retry on.
+
+ These are errors raised before the request is sent to the remote server,
+ which we assume has not triggered the server to process the request.
+
+ Set to ``0`` to fail on the first retry of this type.
+
+ :param int read:
+ How many times to retry on read errors.
+
+ These errors are raised after the request was sent to the server, so the
+ request may have side-effects.
+
+ Set to ``0`` to fail on the first retry of this type.
+
+ :param int redirect:
+ How many redirects to perform. Limit this to avoid infinite redirect
+ loops.
+
+ A redirect is a HTTP response with a status code 301, 302, 303, 307 or
+ 308.
+
+ Set to ``0`` to fail on the first retry of this type.
+
+ Set to ``False`` to disable and imply ``raise_on_redirect=False``.
+
+ :param int status:
+ How many times to retry on bad status codes.
+
+ These are retries made on responses, where status code matches
+ ``status_forcelist``.
+
+ Set to ``0`` to fail on the first retry of this type.
+
+ :param iterable method_whitelist:
+ Set of uppercased HTTP method verbs that we should retry on.
+
+ By default, we only retry on methods which are considered to be
+ idempotent (multiple requests with the same parameters end with the
+ same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`.
+
+ Set to a ``False`` value to retry on any verb.
+
+ :param iterable status_forcelist:
+ A set of integer HTTP status codes that we should force a retry on.
+ A retry is initiated if the request method is in ``method_whitelist``
+ and the response status code is in ``status_forcelist``.
+
+ By default, this is disabled with ``None``.
+
+ :param float backoff_factor:
+ A backoff factor to apply between attempts after the second try
+ (most errors are resolved immediately by a second try without a
+ delay). urllib3 will sleep for::
+
+ {backoff factor} * (2 ^ ({number of total retries} - 1))
+
+ seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
+ for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer
+ than :attr:`Retry.BACKOFF_MAX`.
+
+ By default, backoff is disabled (set to 0).
+
+ :param bool raise_on_redirect: Whether, if the number of redirects is
+ exhausted, to raise a MaxRetryError, or to return a response with a
+ response code in the 3xx range.
+
+ :param bool raise_on_status: Similar meaning to ``raise_on_redirect``:
+ whether we should raise an exception, or return a response,
+ if status falls in ``status_forcelist`` range and retries have
+ been exhausted.
+
+ :param tuple history: The history of the request encountered during
+ each call to :meth:`~Retry.increment`. The list is in the order
+ the requests occurred. Each list item is of class :class:`RequestHistory`.
+
+ :param bool respect_retry_after_header:
+ Whether to respect Retry-After header on status codes defined as
+ :attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.
+
+ """
+
+ DEFAULT_METHOD_WHITELIST = frozenset([
+ 'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'])
+
+ RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])
+
+ #: Maximum backoff time.
+ BACKOFF_MAX = 120
+
+ def __init__(self, total=10, connect=None, read=None, redirect=None, status=None,
+ method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None,
+ backoff_factor=0, raise_on_redirect=True, raise_on_status=True,
+ history=None, respect_retry_after_header=True):
+
+ self.total = total
+ self.connect = connect
+ self.read = read
+ self.status = status
+
+ if redirect is False or total is False:
+ redirect = 0
+ raise_on_redirect = False
+
+ self.redirect = redirect
+ self.status_forcelist = status_forcelist or set()
+ self.method_whitelist = method_whitelist
+ self.backoff_factor = backoff_factor
+ self.raise_on_redirect = raise_on_redirect
+ self.raise_on_status = raise_on_status
+ self.history = history or tuple()
+ self.respect_retry_after_header = respect_retry_after_header
+
+ def new(self, **kw):
+ params = dict(
+ total=self.total,
+ connect=self.connect, read=self.read, redirect=self.redirect, status=self.status,
+ method_whitelist=self.method_whitelist,
+ status_forcelist=self.status_forcelist,
+ backoff_factor=self.backoff_factor,
+ raise_on_redirect=self.raise_on_redirect,
+ raise_on_status=self.raise_on_status,
+ history=self.history,
+ )
+ params.update(kw)
+ return type(self)(**params)
+
+ @classmethod
+ def from_int(cls, retries, redirect=True, default=None):
+ """ Backwards-compatibility for the old retries format."""
+ if retries is None:
+ retries = default if default is not None else cls.DEFAULT
+
+ if isinstance(retries, Retry):
+ return retries
+
+ redirect = bool(redirect) and None
+ new_retries = cls(retries, redirect=redirect)
+ log.debug("Converted retries value: %r -> %r", retries, new_retries)
+ return new_retries
+
+ def get_backoff_time(self):
+ """ Formula for computing the current backoff
+
+ :rtype: float
+ """
+ # We want to consider only the last consecutive errors sequence (Ignore redirects).
+ consecutive_errors_len = len(list(takewhile(lambda x: x.redirect_location is None,
+ reversed(self.history))))
+ if consecutive_errors_len <= 1:
+ return 0
+
+ backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1))
+ return min(self.BACKOFF_MAX, backoff_value)
+
+ def parse_retry_after(self, retry_after):
+ # Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4
+ if re.match(r"^\s*[0-9]+\s*$", retry_after):
+ seconds = int(retry_after)
+ else:
+ retry_date_tuple = email.utils.parsedate(retry_after)
+ if retry_date_tuple is None:
+ raise InvalidHeader("Invalid Retry-After header: %s" % retry_after)
+ retry_date = time.mktime(retry_date_tuple)
+ seconds = retry_date - time.time()
+
+ if seconds < 0:
+ seconds = 0
+
+ return seconds
+
+ def get_retry_after(self, response):
+ """ Get the value of Retry-After in seconds. """
+
+ retry_after = response.getheader("Retry-After")
+
+ if retry_after is None:
+ return None
+
+ return self.parse_retry_after(retry_after)
+
+ def sleep_for_retry(self, response=None):
+ retry_after = self.get_retry_after(response)
+ if retry_after:
+ time.sleep(retry_after)
+ return True
+
+ return False
+
+ def _sleep_backoff(self):
+ backoff = self.get_backoff_time()
+ if backoff <= 0:
+ return
+ time.sleep(backoff)
+
+ def sleep(self, response=None):
+ """ Sleep between retry attempts.
+
+ This method will respect a server's ``Retry-After`` response header
+ and sleep the duration of the time requested. If that is not present, it
+ will use an exponential backoff. By default, the backoff factor is 0 and
+ this method will return immediately.
+ """
+
+ if response:
+ slept = self.sleep_for_retry(response)
+ if slept:
+ return
+
+ self._sleep_backoff()
+
+ def _is_connection_error(self, err):
+ """ Errors when we're fairly sure that the server did not receive the
+ request, so it should be safe to retry.
+ """
+ return isinstance(err, ConnectTimeoutError)
+
+ def _is_read_error(self, err):
+ """ Errors that occur after the request has been started, so we should
+ assume that the server began processing it.
+ """
+ return isinstance(err, (ReadTimeoutError, ProtocolError))
+
+ def _is_method_retryable(self, method):
+ """ Checks if a given HTTP method should be retried upon, depending if
+ it is included on the method whitelist.
+ """
+ if self.method_whitelist and method.upper() not in self.method_whitelist:
+ return False
+
+ return True
+
+ def is_retry(self, method, status_code, has_retry_after=False):
+ """ Is this method/status code retryable? (Based on whitelists and control
+ variables such as the number of total retries to allow, whether to
+ respect the Retry-After header, whether this header is present, and
+ whether the returned status code is on the list of status codes to
+ be retried upon on the presence of the aforementioned header)
+ """
+ if not self._is_method_retryable(method):
+ return False
+
+ if self.status_forcelist and status_code in self.status_forcelist:
+ return True
+
+ return (self.total and self.respect_retry_after_header and
+ has_retry_after and (status_code in self.RETRY_AFTER_STATUS_CODES))
+
+ def is_exhausted(self):
+ """ Are we out of retries? """
+ retry_counts = (self.total, self.connect, self.read, self.redirect, self.status)
+ retry_counts = list(filter(None, retry_counts))
+ if not retry_counts:
+ return False
+
+ return min(retry_counts) < 0
+
+ def increment(self, method=None, url=None, response=None, error=None,
+ _pool=None, _stacktrace=None):
+ """ Return a new Retry object with incremented retry counters.
+
+ :param response: A response object, or None, if the server did not
+ return a response.
+ :type response: :class:`~urllib3.response.HTTPResponse`
+ :param Exception error: An error encountered during the request, or
+ None if the response was received successfully.
+
+ :return: A new ``Retry`` object.
+ """
+ if self.total is False and error:
+ # Disabled, indicate to re-raise the error.
+ raise six.reraise(type(error), error, _stacktrace)
+
+ total = self.total
+ if total is not None:
+ total -= 1
+
+ connect = self.connect
+ read = self.read
+ redirect = self.redirect
+ status_count = self.status
+ cause = 'unknown'
+ status = None
+ redirect_location = None
+
+ if error and self._is_connection_error(error):
+ # Connect retry?
+ if connect is False:
+ raise six.reraise(type(error), error, _stacktrace)
+ elif connect is not None:
+ connect -= 1
+
+ elif error and self._is_read_error(error):
+ # Read retry?
+ if read is False or not self._is_method_retryable(method):
+ raise six.reraise(type(error), error, _stacktrace)
+ elif read is not None:
+ read -= 1
+
+ elif response and response.get_redirect_location():
+ # Redirect retry?
+ if redirect is not None:
+ redirect -= 1
+ cause = 'too many redirects'
+ redirect_location = response.get_redirect_location()
+ status = response.status
+
+ else:
+ # Incrementing because of a server error like a 500 in
+ # status_forcelist and a the given method is in the whitelist
+ cause = ResponseError.GENERIC_ERROR
+ if response and response.status:
+ if status_count is not None:
+ status_count -= 1
+ cause = ResponseError.SPECIFIC_ERROR.format(
+ status_code=response.status)
+ status = response.status
+
+ history = self.history + (RequestHistory(method, url, error, status, redirect_location),)
+
+ new_retry = self.new(
+ total=total,
+ connect=connect, read=read, redirect=redirect, status=status_count,
+ history=history)
+
+ if new_retry.is_exhausted():
+ raise MaxRetryError(_pool, url, error or ResponseError(cause))
+
+ log.debug("Incremented Retry for (url='%s'): %r", url, new_retry)
+
+ return new_retry
+
+ def __repr__(self):
+ return ('{cls.__name__}(total={self.total}, connect={self.connect}, '
+ 'read={self.read}, redirect={self.redirect}, status={self.status})').format(
+ cls=type(self), self=self)
+
+
+# For backwards compatibility (equivalent to pre-v1.9):
+Retry.DEFAULT = Retry(3)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/selectors.py b/collectors/python.d.plugin/python_modules/urllib3/util/selectors.py
new file mode 100644
index 000000000..c0997b1a2
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/selectors.py
@@ -0,0 +1,582 @@
+# SPDX-License-Identifier: MIT
+# Backport of selectors.py from Python 3.5+ to support Python < 3.4
+# Also has the behavior specified in PEP 475 which is to retry syscalls
+# in the case of an EINTR error. This module is required because selectors34
+# does not follow this behavior and instead returns that no dile descriptor
+# events have occurred rather than retry the syscall. The decision to drop
+# support for select.devpoll is made to maintain 100% test coverage.
+
+import errno
+import math
+import select
+import socket
+import sys
+import time
+from collections import namedtuple, Mapping
+
+try:
+ monotonic = time.monotonic
+except (AttributeError, ImportError): # Python 3.3<
+ monotonic = time.time
+
+EVENT_READ = (1 << 0)
+EVENT_WRITE = (1 << 1)
+
+HAS_SELECT = True # Variable that shows whether the platform has a selector.
+_SYSCALL_SENTINEL = object() # Sentinel in case a system call returns None.
+_DEFAULT_SELECTOR = None
+
+
+class SelectorError(Exception):
+ def __init__(self, errcode):
+ super(SelectorError, self).__init__()
+ self.errno = errcode
+
+ def __repr__(self):
+ return "<SelectorError errno={0}>".format(self.errno)
+
+ def __str__(self):
+ return self.__repr__()
+
+
+def _fileobj_to_fd(fileobj):
+ """ Return a file descriptor from a file object. If
+ given an integer will simply return that integer back. """
+ if isinstance(fileobj, int):
+ fd = fileobj
+ else:
+ try:
+ fd = int(fileobj.fileno())
+ except (AttributeError, TypeError, ValueError):
+ raise ValueError("Invalid file object: {0!r}".format(fileobj))
+ if fd < 0:
+ raise ValueError("Invalid file descriptor: {0}".format(fd))
+ return fd
+
+
+# Determine which function to use to wrap system calls because Python 3.5+
+# already handles the case when system calls are interrupted.
+if sys.version_info >= (3, 5):
+ def _syscall_wrapper(func, _, *args, **kwargs):
+ """ This is the short-circuit version of the below logic
+ because in Python 3.5+ all system calls automatically restart
+ and recalculate their timeouts. """
+ try:
+ return func(*args, **kwargs)
+ except (OSError, IOError, select.error) as e:
+ errcode = None
+ if hasattr(e, "errno"):
+ errcode = e.errno
+ raise SelectorError(errcode)
+else:
+ def _syscall_wrapper(func, recalc_timeout, *args, **kwargs):
+ """ Wrapper function for syscalls that could fail due to EINTR.
+ All functions should be retried if there is time left in the timeout
+ in accordance with PEP 475. """
+ timeout = kwargs.get("timeout", None)
+ if timeout is None:
+ expires = None
+ recalc_timeout = False
+ else:
+ timeout = float(timeout)
+ if timeout < 0.0: # Timeout less than 0 treated as no timeout.
+ expires = None
+ else:
+ expires = monotonic() + timeout
+
+ args = list(args)
+ if recalc_timeout and "timeout" not in kwargs:
+ raise ValueError(
+ "Timeout must be in args or kwargs to be recalculated")
+
+ result = _SYSCALL_SENTINEL
+ while result is _SYSCALL_SENTINEL:
+ try:
+ result = func(*args, **kwargs)
+ # OSError is thrown by select.select
+ # IOError is thrown by select.epoll.poll
+ # select.error is thrown by select.poll.poll
+ # Aren't we thankful for Python 3.x rework for exceptions?
+ except (OSError, IOError, select.error) as e:
+ # select.error wasn't a subclass of OSError in the past.
+ errcode = None
+ if hasattr(e, "errno"):
+ errcode = e.errno
+ elif hasattr(e, "args"):
+ errcode = e.args[0]
+
+ # Also test for the Windows equivalent of EINTR.
+ is_interrupt = (errcode == errno.EINTR or (hasattr(errno, "WSAEINTR") and
+ errcode == errno.WSAEINTR))
+
+ if is_interrupt:
+ if expires is not None:
+ current_time = monotonic()
+ if current_time > expires:
+ raise OSError(errno=errno.ETIMEDOUT)
+ if recalc_timeout:
+ if "timeout" in kwargs:
+ kwargs["timeout"] = expires - current_time
+ continue
+ if errcode:
+ raise SelectorError(errcode)
+ else:
+ raise
+ return result
+
+
+SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data'])
+
+
+class _SelectorMapping(Mapping):
+ """ Mapping of file objects to selector keys """
+
+ def __init__(self, selector):
+ self._selector = selector
+
+ def __len__(self):
+ return len(self._selector._fd_to_key)
+
+ def __getitem__(self, fileobj):
+ try:
+ fd = self._selector._fileobj_lookup(fileobj)
+ return self._selector._fd_to_key[fd]
+ except KeyError:
+ raise KeyError("{0!r} is not registered.".format(fileobj))
+
+ def __iter__(self):
+ return iter(self._selector._fd_to_key)
+
+
+class BaseSelector(object):
+ """ Abstract Selector class
+
+ A selector supports registering file objects to be monitored
+ for specific I/O events.
+
+ A file object is a file descriptor or any object with a
+ `fileno()` method. An arbitrary object can be attached to the
+ file object which can be used for example to store context info,
+ a callback, etc.
+
+ A selector can use various implementations (select(), poll(), epoll(),
+ and kqueue()) depending on the platform. The 'DefaultSelector' class uses
+ the most efficient implementation for the current platform.
+ """
+ def __init__(self):
+ # Maps file descriptors to keys.
+ self._fd_to_key = {}
+
+ # Read-only mapping returned by get_map()
+ self._map = _SelectorMapping(self)
+
+ def _fileobj_lookup(self, fileobj):
+ """ Return a file descriptor from a file object.
+ This wraps _fileobj_to_fd() to do an exhaustive
+ search in case the object is invalid but we still
+ have it in our map. Used by unregister() so we can
+ unregister an object that was previously registered
+ even if it is closed. It is also used by _SelectorMapping
+ """
+ try:
+ return _fileobj_to_fd(fileobj)
+ except ValueError:
+
+ # Search through all our mapped keys.
+ for key in self._fd_to_key.values():
+ if key.fileobj is fileobj:
+ return key.fd
+
+ # Raise ValueError after all.
+ raise
+
+ def register(self, fileobj, events, data=None):
+ """ Register a file object for a set of events to monitor. """
+ if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
+ raise ValueError("Invalid events: {0!r}".format(events))
+
+ key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
+
+ if key.fd in self._fd_to_key:
+ raise KeyError("{0!r} (FD {1}) is already registered"
+ .format(fileobj, key.fd))
+
+ self._fd_to_key[key.fd] = key
+ return key
+
+ def unregister(self, fileobj):
+ """ Unregister a file object from being monitored. """
+ try:
+ key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
+ except KeyError:
+ raise KeyError("{0!r} is not registered".format(fileobj))
+
+ # Getting the fileno of a closed socket on Windows errors with EBADF.
+ except socket.error as e: # Platform-specific: Windows.
+ if e.errno != errno.EBADF:
+ raise
+ else:
+ for key in self._fd_to_key.values():
+ if key.fileobj is fileobj:
+ self._fd_to_key.pop(key.fd)
+ break
+ else:
+ raise KeyError("{0!r} is not registered".format(fileobj))
+ return key
+
+ def modify(self, fileobj, events, data=None):
+ """ Change a registered file object monitored events and data. """
+ # NOTE: Some subclasses optimize this operation even further.
+ try:
+ key = self._fd_to_key[self._fileobj_lookup(fileobj)]
+ except KeyError:
+ raise KeyError("{0!r} is not registered".format(fileobj))
+
+ if events != key.events:
+ self.unregister(fileobj)
+ key = self.register(fileobj, events, data)
+
+ elif data != key.data:
+ # Use a shortcut to update the data.
+ key = key._replace(data=data)
+ self._fd_to_key[key.fd] = key
+
+ return key
+
+ def select(self, timeout=None):
+ """ Perform the actual selection until some monitored file objects
+ are ready or the timeout expires. """
+ raise NotImplementedError()
+
+ def close(self):
+ """ Close the selector. This must be called to ensure that all
+ underlying resources are freed. """
+ self._fd_to_key.clear()
+ self._map = None
+
+ def get_key(self, fileobj):
+ """ Return the key associated with a registered file object. """
+ mapping = self.get_map()
+ if mapping is None:
+ raise RuntimeError("Selector is closed")
+ try:
+ return mapping[fileobj]
+ except KeyError:
+ raise KeyError("{0!r} is not registered".format(fileobj))
+
+ def get_map(self):
+ """ Return a mapping of file objects to selector keys """
+ return self._map
+
+ def _key_from_fd(self, fd):
+ """ Return the key associated to a given file descriptor
+ Return None if it is not found. """
+ try:
+ return self._fd_to_key[fd]
+ except KeyError:
+ return None
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args):
+ self.close()
+
+
+# Almost all platforms have select.select()
+if hasattr(select, "select"):
+ class SelectSelector(BaseSelector):
+ """ Select-based selector. """
+ def __init__(self):
+ super(SelectSelector, self).__init__()
+ self._readers = set()
+ self._writers = set()
+
+ def register(self, fileobj, events, data=None):
+ key = super(SelectSelector, self).register(fileobj, events, data)
+ if events & EVENT_READ:
+ self._readers.add(key.fd)
+ if events & EVENT_WRITE:
+ self._writers.add(key.fd)
+ return key
+
+ def unregister(self, fileobj):
+ key = super(SelectSelector, self).unregister(fileobj)
+ self._readers.discard(key.fd)
+ self._writers.discard(key.fd)
+ return key
+
+ def _select(self, r, w, timeout=None):
+ """ Wrapper for select.select because timeout is a positional arg """
+ return select.select(r, w, [], timeout)
+
+ def select(self, timeout=None):
+ # Selecting on empty lists on Windows errors out.
+ if not len(self._readers) and not len(self._writers):
+ return []
+
+ timeout = None if timeout is None else max(timeout, 0.0)
+ ready = []
+ r, w, _ = _syscall_wrapper(self._select, True, self._readers,
+ self._writers, timeout)
+ r = set(r)
+ w = set(w)
+ for fd in r | w:
+ events = 0
+ if fd in r:
+ events |= EVENT_READ
+ if fd in w:
+ events |= EVENT_WRITE
+
+ key = self._key_from_fd(fd)
+ if key:
+ ready.append((key, events & key.events))
+ return ready
+
+
+if hasattr(select, "poll"):
+ class PollSelector(BaseSelector):
+ """ Poll-based selector """
+ def __init__(self):
+ super(PollSelector, self).__init__()
+ self._poll = select.poll()
+
+ def register(self, fileobj, events, data=None):
+ key = super(PollSelector, self).register(fileobj, events, data)
+ event_mask = 0
+ if events & EVENT_READ:
+ event_mask |= select.POLLIN
+ if events & EVENT_WRITE:
+ event_mask |= select.POLLOUT
+ self._poll.register(key.fd, event_mask)
+ return key
+
+ def unregister(self, fileobj):
+ key = super(PollSelector, self).unregister(fileobj)
+ self._poll.unregister(key.fd)
+ return key
+
+ def _wrap_poll(self, timeout=None):
+ """ Wrapper function for select.poll.poll() so that
+ _syscall_wrapper can work with only seconds. """
+ if timeout is not None:
+ if timeout <= 0:
+ timeout = 0
+ else:
+ # select.poll.poll() has a resolution of 1 millisecond,
+ # round away from zero to wait *at least* timeout seconds.
+ timeout = math.ceil(timeout * 1e3)
+
+ result = self._poll.poll(timeout)
+ return result
+
+ def select(self, timeout=None):
+ ready = []
+ fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
+ for fd, event_mask in fd_events:
+ events = 0
+ if event_mask & ~select.POLLIN:
+ events |= EVENT_WRITE
+ if event_mask & ~select.POLLOUT:
+ events |= EVENT_READ
+
+ key = self._key_from_fd(fd)
+ if key:
+ ready.append((key, events & key.events))
+
+ return ready
+
+
+if hasattr(select, "epoll"):
+ class EpollSelector(BaseSelector):
+ """ Epoll-based selector """
+ def __init__(self):
+ super(EpollSelector, self).__init__()
+ self._epoll = select.epoll()
+
+ def fileno(self):
+ return self._epoll.fileno()
+
+ def register(self, fileobj, events, data=None):
+ key = super(EpollSelector, self).register(fileobj, events, data)
+ events_mask = 0
+ if events & EVENT_READ:
+ events_mask |= select.EPOLLIN
+ if events & EVENT_WRITE:
+ events_mask |= select.EPOLLOUT
+ _syscall_wrapper(self._epoll.register, False, key.fd, events_mask)
+ return key
+
+ def unregister(self, fileobj):
+ key = super(EpollSelector, self).unregister(fileobj)
+ try:
+ _syscall_wrapper(self._epoll.unregister, False, key.fd)
+ except SelectorError:
+ # This can occur when the fd was closed since registry.
+ pass
+ return key
+
+ def select(self, timeout=None):
+ if timeout is not None:
+ if timeout <= 0:
+ timeout = 0.0
+ else:
+ # select.epoll.poll() has a resolution of 1 millisecond
+ # but luckily takes seconds so we don't need a wrapper
+ # like PollSelector. Just for better rounding.
+ timeout = math.ceil(timeout * 1e3) * 1e-3
+ timeout = float(timeout)
+ else:
+ timeout = -1.0 # epoll.poll() must have a float.
+
+ # We always want at least 1 to ensure that select can be called
+ # with no file descriptors registered. Otherwise will fail.
+ max_events = max(len(self._fd_to_key), 1)
+
+ ready = []
+ fd_events = _syscall_wrapper(self._epoll.poll, True,
+ timeout=timeout,
+ maxevents=max_events)
+ for fd, event_mask in fd_events:
+ events = 0
+ if event_mask & ~select.EPOLLIN:
+ events |= EVENT_WRITE
+ if event_mask & ~select.EPOLLOUT:
+ events |= EVENT_READ
+
+ key = self._key_from_fd(fd)
+ if key:
+ ready.append((key, events & key.events))
+ return ready
+
+ def close(self):
+ self._epoll.close()
+ super(EpollSelector, self).close()
+
+
+if hasattr(select, "kqueue"):
+ class KqueueSelector(BaseSelector):
+ """ Kqueue / Kevent-based selector """
+ def __init__(self):
+ super(KqueueSelector, self).__init__()
+ self._kqueue = select.kqueue()
+
+ def fileno(self):
+ return self._kqueue.fileno()
+
+ def register(self, fileobj, events, data=None):
+ key = super(KqueueSelector, self).register(fileobj, events, data)
+ if events & EVENT_READ:
+ kevent = select.kevent(key.fd,
+ select.KQ_FILTER_READ,
+ select.KQ_EV_ADD)
+
+ _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
+
+ if events & EVENT_WRITE:
+ kevent = select.kevent(key.fd,
+ select.KQ_FILTER_WRITE,
+ select.KQ_EV_ADD)
+
+ _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
+
+ return key
+
+ def unregister(self, fileobj):
+ key = super(KqueueSelector, self).unregister(fileobj)
+ if key.events & EVENT_READ:
+ kevent = select.kevent(key.fd,
+ select.KQ_FILTER_READ,
+ select.KQ_EV_DELETE)
+ try:
+ _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
+ except SelectorError:
+ pass
+ if key.events & EVENT_WRITE:
+ kevent = select.kevent(key.fd,
+ select.KQ_FILTER_WRITE,
+ select.KQ_EV_DELETE)
+ try:
+ _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
+ except SelectorError:
+ pass
+
+ return key
+
+ def select(self, timeout=None):
+ if timeout is not None:
+ timeout = max(timeout, 0)
+
+ max_events = len(self._fd_to_key) * 2
+ ready_fds = {}
+
+ kevent_list = _syscall_wrapper(self._kqueue.control, True,
+ None, max_events, timeout)
+
+ for kevent in kevent_list:
+ fd = kevent.ident
+ event_mask = kevent.filter
+ events = 0
+ if event_mask == select.KQ_FILTER_READ:
+ events |= EVENT_READ
+ if event_mask == select.KQ_FILTER_WRITE:
+ events |= EVENT_WRITE
+
+ key = self._key_from_fd(fd)
+ if key:
+ if key.fd not in ready_fds:
+ ready_fds[key.fd] = (key, events & key.events)
+ else:
+ old_events = ready_fds[key.fd][1]
+ ready_fds[key.fd] = (key, (events | old_events) & key.events)
+
+ return list(ready_fds.values())
+
+ def close(self):
+ self._kqueue.close()
+ super(KqueueSelector, self).close()
+
+
+if not hasattr(select, 'select'): # Platform-specific: AppEngine
+ HAS_SELECT = False
+
+
+def _can_allocate(struct):
+ """ Checks that select structs can be allocated by the underlying
+ operating system, not just advertised by the select module. We don't
+ check select() because we'll be hopeful that most platforms that
+ don't have it available will not advertise it. (ie: GAE) """
+ try:
+ # select.poll() objects won't fail until used.
+ if struct == 'poll':
+ p = select.poll()
+ p.poll(0)
+
+ # All others will fail on allocation.
+ else:
+ getattr(select, struct)().close()
+ return True
+ except (OSError, AttributeError) as e:
+ return False
+
+
+# Choose the best implementation, roughly:
+# kqueue == epoll > poll > select. Devpoll not supported. (See above)
+# select() also can't accept a FD > FD_SETSIZE (usually around 1024)
+def DefaultSelector():
+ """ This function serves as a first call for DefaultSelector to
+ detect if the select module is being monkey-patched incorrectly
+ by eventlet, greenlet, and preserve proper behavior. """
+ global _DEFAULT_SELECTOR
+ if _DEFAULT_SELECTOR is None:
+ if _can_allocate('kqueue'):
+ _DEFAULT_SELECTOR = KqueueSelector
+ elif _can_allocate('epoll'):
+ _DEFAULT_SELECTOR = EpollSelector
+ elif _can_allocate('poll'):
+ _DEFAULT_SELECTOR = PollSelector
+ elif hasattr(select, 'select'):
+ _DEFAULT_SELECTOR = SelectSelector
+ else: # Platform-specific: AppEngine
+ raise ValueError('Platform does not have a selector')
+ return _DEFAULT_SELECTOR()
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py b/collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py
new file mode 100644
index 000000000..ece3ec39e
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py
@@ -0,0 +1,338 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+import errno
+import warnings
+import hmac
+
+from binascii import hexlify, unhexlify
+from hashlib import md5, sha1, sha256
+
+from ..exceptions import SSLError, InsecurePlatformWarning, SNIMissingWarning
+
+
+SSLContext = None
+HAS_SNI = False
+IS_PYOPENSSL = False
+IS_SECURETRANSPORT = False
+
+# Maps the length of a digest to a possible hash function producing this digest
+HASHFUNC_MAP = {
+ 32: md5,
+ 40: sha1,
+ 64: sha256,
+}
+
+
+def _const_compare_digest_backport(a, b):
+ """
+ Compare two digests of equal length in constant time.
+
+ The digests must be of type str/bytes.
+ Returns True if the digests match, and False otherwise.
+ """
+ result = abs(len(a) - len(b))
+ for l, r in zip(bytearray(a), bytearray(b)):
+ result |= l ^ r
+ return result == 0
+
+
+_const_compare_digest = getattr(hmac, 'compare_digest',
+ _const_compare_digest_backport)
+
+
+try: # Test for SSL features
+ import ssl
+ from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23
+ from ssl import HAS_SNI # Has SNI?
+except ImportError:
+ pass
+
+
+try:
+ from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION
+except ImportError:
+ OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000
+ OP_NO_COMPRESSION = 0x20000
+
+# A secure default.
+# Sources for more information on TLS ciphers:
+#
+# - https://wiki.mozilla.org/Security/Server_Side_TLS
+# - https://www.ssllabs.com/projects/best-practices/index.html
+# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
+#
+# The general intent is:
+# - Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
+# - prefer ECDHE over DHE for better performance,
+# - prefer any AES-GCM and ChaCha20 over any AES-CBC for better performance and
+# security,
+# - prefer AES-GCM over ChaCha20 because hardware-accelerated AES is common,
+# - disable NULL authentication, MD5 MACs and DSS for security reasons.
+DEFAULT_CIPHERS = ':'.join([
+ 'ECDH+AESGCM',
+ 'ECDH+CHACHA20',
+ 'DH+AESGCM',
+ 'DH+CHACHA20',
+ 'ECDH+AES256',
+ 'DH+AES256',
+ 'ECDH+AES128',
+ 'DH+AES',
+ 'RSA+AESGCM',
+ 'RSA+AES',
+ '!aNULL',
+ '!eNULL',
+ '!MD5',
+])
+
+try:
+ from ssl import SSLContext # Modern SSL?
+except ImportError:
+ import sys
+
+ class SSLContext(object): # Platform-specific: Python 2 & 3.1
+ supports_set_ciphers = ((2, 7) <= sys.version_info < (3,) or
+ (3, 2) <= sys.version_info)
+
+ def __init__(self, protocol_version):
+ self.protocol = protocol_version
+ # Use default values from a real SSLContext
+ self.check_hostname = False
+ self.verify_mode = ssl.CERT_NONE
+ self.ca_certs = None
+ self.options = 0
+ self.certfile = None
+ self.keyfile = None
+ self.ciphers = None
+
+ def load_cert_chain(self, certfile, keyfile):
+ self.certfile = certfile
+ self.keyfile = keyfile
+
+ def load_verify_locations(self, cafile=None, capath=None):
+ self.ca_certs = cafile
+
+ if capath is not None:
+ raise SSLError("CA directories not supported in older Pythons")
+
+ def set_ciphers(self, cipher_suite):
+ if not self.supports_set_ciphers:
+ raise TypeError(
+ 'Your version of Python does not support setting '
+ 'a custom cipher suite. Please upgrade to Python '
+ '2.7, 3.2, or later if you need this functionality.'
+ )
+ self.ciphers = cipher_suite
+
+ def wrap_socket(self, socket, server_hostname=None, server_side=False):
+ warnings.warn(
+ 'A true SSLContext object is not available. This prevents '
+ 'urllib3 from configuring SSL appropriately and may cause '
+ 'certain SSL connections to fail. You can upgrade to a newer '
+ 'version of Python to solve this. For more information, see '
+ 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
+ '#ssl-warnings',
+ InsecurePlatformWarning
+ )
+ kwargs = {
+ 'keyfile': self.keyfile,
+ 'certfile': self.certfile,
+ 'ca_certs': self.ca_certs,
+ 'cert_reqs': self.verify_mode,
+ 'ssl_version': self.protocol,
+ 'server_side': server_side,
+ }
+ if self.supports_set_ciphers: # Platform-specific: Python 2.7+
+ return wrap_socket(socket, ciphers=self.ciphers, **kwargs)
+ else: # Platform-specific: Python 2.6
+ return wrap_socket(socket, **kwargs)
+
+
+def assert_fingerprint(cert, fingerprint):
+ """
+ Checks if given fingerprint matches the supplied certificate.
+
+ :param cert:
+ Certificate as bytes object.
+ :param fingerprint:
+ Fingerprint as string of hexdigits, can be interspersed by colons.
+ """
+
+ fingerprint = fingerprint.replace(':', '').lower()
+ digest_length = len(fingerprint)
+ hashfunc = HASHFUNC_MAP.get(digest_length)
+ if not hashfunc:
+ raise SSLError(
+ 'Fingerprint of invalid length: {0}'.format(fingerprint))
+
+ # We need encode() here for py32; works on py2 and p33.
+ fingerprint_bytes = unhexlify(fingerprint.encode())
+
+ cert_digest = hashfunc(cert).digest()
+
+ if not _const_compare_digest(cert_digest, fingerprint_bytes):
+ raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".'
+ .format(fingerprint, hexlify(cert_digest)))
+
+
+def resolve_cert_reqs(candidate):
+ """
+ Resolves the argument to a numeric constant, which can be passed to
+ the wrap_socket function/method from the ssl module.
+ Defaults to :data:`ssl.CERT_NONE`.
+ If given a string it is assumed to be the name of the constant in the
+ :mod:`ssl` module or its abbrevation.
+ (So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
+ If it's neither `None` nor a string we assume it is already the numeric
+ constant which can directly be passed to wrap_socket.
+ """
+ if candidate is None:
+ return CERT_NONE
+
+ if isinstance(candidate, str):
+ res = getattr(ssl, candidate, None)
+ if res is None:
+ res = getattr(ssl, 'CERT_' + candidate)
+ return res
+
+ return candidate
+
+
+def resolve_ssl_version(candidate):
+ """
+ like resolve_cert_reqs
+ """
+ if candidate is None:
+ return PROTOCOL_SSLv23
+
+ if isinstance(candidate, str):
+ res = getattr(ssl, candidate, None)
+ if res is None:
+ res = getattr(ssl, 'PROTOCOL_' + candidate)
+ return res
+
+ return candidate
+
+
+def create_urllib3_context(ssl_version=None, cert_reqs=None,
+ options=None, ciphers=None):
+ """All arguments have the same meaning as ``ssl_wrap_socket``.
+
+ By default, this function does a lot of the same work that
+ ``ssl.create_default_context`` does on Python 3.4+. It:
+
+ - Disables SSLv2, SSLv3, and compression
+ - Sets a restricted set of server ciphers
+
+ If you wish to enable SSLv3, you can do::
+
+ from urllib3.util import ssl_
+ context = ssl_.create_urllib3_context()
+ context.options &= ~ssl_.OP_NO_SSLv3
+
+ You can do the same to enable compression (substituting ``COMPRESSION``
+ for ``SSLv3`` in the last line above).
+
+ :param ssl_version:
+ The desired protocol version to use. This will default to
+ PROTOCOL_SSLv23 which will negotiate the highest protocol that both
+ the server and your installation of OpenSSL support.
+ :param cert_reqs:
+ Whether to require the certificate verification. This defaults to
+ ``ssl.CERT_REQUIRED``.
+ :param options:
+ Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,
+ ``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``.
+ :param ciphers:
+ Which cipher suites to allow the server to select.
+ :returns:
+ Constructed SSLContext object with specified options
+ :rtype: SSLContext
+ """
+ context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23)
+
+ # Setting the default here, as we may have no ssl module on import
+ cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs
+
+ if options is None:
+ options = 0
+ # SSLv2 is easily broken and is considered harmful and dangerous
+ options |= OP_NO_SSLv2
+ # SSLv3 has several problems and is now dangerous
+ options |= OP_NO_SSLv3
+ # Disable compression to prevent CRIME attacks for OpenSSL 1.0+
+ # (issue #309)
+ options |= OP_NO_COMPRESSION
+
+ context.options |= options
+
+ if getattr(context, 'supports_set_ciphers', True): # Platform-specific: Python 2.6
+ context.set_ciphers(ciphers or DEFAULT_CIPHERS)
+
+ context.verify_mode = cert_reqs
+ if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2
+ # We do our own verification, including fingerprints and alternative
+ # hostnames. So disable it here
+ context.check_hostname = False
+ return context
+
+
+def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
+ ca_certs=None, server_hostname=None,
+ ssl_version=None, ciphers=None, ssl_context=None,
+ ca_cert_dir=None):
+ """
+ All arguments except for server_hostname, ssl_context, and ca_cert_dir have
+ the same meaning as they do when using :func:`ssl.wrap_socket`.
+
+ :param server_hostname:
+ When SNI is supported, the expected hostname of the certificate
+ :param ssl_context:
+ A pre-made :class:`SSLContext` object. If none is provided, one will
+ be created using :func:`create_urllib3_context`.
+ :param ciphers:
+ A string of ciphers we wish the client to support. This is not
+ supported on Python 2.6 as the ssl module does not support it.
+ :param ca_cert_dir:
+ A directory containing CA certificates in multiple separate files, as
+ supported by OpenSSL's -CApath flag or the capath argument to
+ SSLContext.load_verify_locations().
+ """
+ context = ssl_context
+ if context is None:
+ # Note: This branch of code and all the variables in it are no longer
+ # used by urllib3 itself. We should consider deprecating and removing
+ # this code.
+ context = create_urllib3_context(ssl_version, cert_reqs,
+ ciphers=ciphers)
+
+ if ca_certs or ca_cert_dir:
+ try:
+ context.load_verify_locations(ca_certs, ca_cert_dir)
+ except IOError as e: # Platform-specific: Python 2.6, 2.7, 3.2
+ raise SSLError(e)
+ # Py33 raises FileNotFoundError which subclasses OSError
+ # These are not equivalent unless we check the errno attribute
+ except OSError as e: # Platform-specific: Python 3.3 and beyond
+ if e.errno == errno.ENOENT:
+ raise SSLError(e)
+ raise
+ elif getattr(context, 'load_default_certs', None) is not None:
+ # try to load OS default certs; works well on Windows (require Python3.4+)
+ context.load_default_certs()
+
+ if certfile:
+ context.load_cert_chain(certfile, keyfile)
+ if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI
+ return context.wrap_socket(sock, server_hostname=server_hostname)
+
+ warnings.warn(
+ 'An HTTPS request has been made, but the SNI (Subject Name '
+ 'Indication) extension to TLS is not available on this platform. '
+ 'This may cause the server to present an incorrect TLS '
+ 'certificate, which can cause validation failures. You can upgrade to '
+ 'a newer version of Python to solve this. For more information, see '
+ 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
+ '#ssl-warnings',
+ SNIMissingWarning
+ )
+ return context.wrap_socket(sock)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/timeout.py b/collectors/python.d.plugin/python_modules/urllib3/util/timeout.py
new file mode 100644
index 000000000..4041cf9b9
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/timeout.py
@@ -0,0 +1,243 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+# The default socket timeout, used by httplib to indicate that no timeout was
+# specified by the user
+from socket import _GLOBAL_DEFAULT_TIMEOUT
+import time
+
+from ..exceptions import TimeoutStateError
+
+# A sentinel value to indicate that no timeout was specified by the user in
+# urllib3
+_Default = object()
+
+
+# Use time.monotonic if available.
+current_time = getattr(time, "monotonic", time.time)
+
+
+class Timeout(object):
+ """ Timeout configuration.
+
+ Timeouts can be defined as a default for a pool::
+
+ timeout = Timeout(connect=2.0, read=7.0)
+ http = PoolManager(timeout=timeout)
+ response = http.request('GET', 'http://example.com/')
+
+ Or per-request (which overrides the default for the pool)::
+
+ response = http.request('GET', 'http://example.com/', timeout=Timeout(10))
+
+ Timeouts can be disabled by setting all the parameters to ``None``::
+
+ no_timeout = Timeout(connect=None, read=None)
+ response = http.request('GET', 'http://example.com/, timeout=no_timeout)
+
+
+ :param total:
+ This combines the connect and read timeouts into one; the read timeout
+ will be set to the time leftover from the connect attempt. In the
+ event that both a connect timeout and a total are specified, or a read
+ timeout and a total are specified, the shorter timeout will be applied.
+
+ Defaults to None.
+
+ :type total: integer, float, or None
+
+ :param connect:
+ The maximum amount of time to wait for a connection attempt to a server
+ to succeed. Omitting the parameter will default the connect timeout to
+ the system default, probably `the global default timeout in socket.py
+ <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
+ None will set an infinite timeout for connection attempts.
+
+ :type connect: integer, float, or None
+
+ :param read:
+ The maximum amount of time to wait between consecutive
+ read operations for a response from the server. Omitting
+ the parameter will default the read timeout to the system
+ default, probably `the global default timeout in socket.py
+ <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
+ None will set an infinite timeout.
+
+ :type read: integer, float, or None
+
+ .. note::
+
+ Many factors can affect the total amount of time for urllib3 to return
+ an HTTP response.
+
+ For example, Python's DNS resolver does not obey the timeout specified
+ on the socket. Other factors that can affect total request time include
+ high CPU load, high swap, the program running at a low priority level,
+ or other behaviors.
+
+ In addition, the read and total timeouts only measure the time between
+ read operations on the socket connecting the client and the server,
+ not the total amount of time for the request to return a complete
+ response. For most requests, the timeout is raised because the server
+ has not sent the first byte in the specified time. This is not always
+ the case; if a server streams one byte every fifteen seconds, a timeout
+ of 20 seconds will not trigger, even though the request will take
+ several minutes to complete.
+
+ If your goal is to cut off any request after a set amount of wall clock
+ time, consider having a second "watcher" thread to cut off a slow
+ request.
+ """
+
+ #: A sentinel object representing the default timeout value
+ DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
+
+ def __init__(self, total=None, connect=_Default, read=_Default):
+ self._connect = self._validate_timeout(connect, 'connect')
+ self._read = self._validate_timeout(read, 'read')
+ self.total = self._validate_timeout(total, 'total')
+ self._start_connect = None
+
+ def __str__(self):
+ return '%s(connect=%r, read=%r, total=%r)' % (
+ type(self).__name__, self._connect, self._read, self.total)
+
+ @classmethod
+ def _validate_timeout(cls, value, name):
+ """ Check that a timeout attribute is valid.
+
+ :param value: The timeout value to validate
+ :param name: The name of the timeout attribute to validate. This is
+ used to specify in error messages.
+ :return: The validated and casted version of the given value.
+ :raises ValueError: If it is a numeric value less than or equal to
+ zero, or the type is not an integer, float, or None.
+ """
+ if value is _Default:
+ return cls.DEFAULT_TIMEOUT
+
+ if value is None or value is cls.DEFAULT_TIMEOUT:
+ return value
+
+ if isinstance(value, bool):
+ raise ValueError("Timeout cannot be a boolean value. It must "
+ "be an int, float or None.")
+ try:
+ float(value)
+ except (TypeError, ValueError):
+ raise ValueError("Timeout value %s was %s, but it must be an "
+ "int, float or None." % (name, value))
+
+ try:
+ if value <= 0:
+ raise ValueError("Attempted to set %s timeout to %s, but the "
+ "timeout cannot be set to a value less "
+ "than or equal to 0." % (name, value))
+ except TypeError: # Python 3
+ raise ValueError("Timeout value %s was %s, but it must be an "
+ "int, float or None." % (name, value))
+
+ return value
+
+ @classmethod
+ def from_float(cls, timeout):
+ """ Create a new Timeout from a legacy timeout value.
+
+ The timeout value used by httplib.py sets the same timeout on the
+ connect(), and recv() socket requests. This creates a :class:`Timeout`
+ object that sets the individual timeouts to the ``timeout`` value
+ passed to this function.
+
+ :param timeout: The legacy timeout value.
+ :type timeout: integer, float, sentinel default object, or None
+ :return: Timeout object
+ :rtype: :class:`Timeout`
+ """
+ return Timeout(read=timeout, connect=timeout)
+
+ def clone(self):
+ """ Create a copy of the timeout object
+
+ Timeout properties are stored per-pool but each request needs a fresh
+ Timeout object to ensure each one has its own start/stop configured.
+
+ :return: a copy of the timeout object
+ :rtype: :class:`Timeout`
+ """
+ # We can't use copy.deepcopy because that will also create a new object
+ # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
+ # detect the user default.
+ return Timeout(connect=self._connect, read=self._read,
+ total=self.total)
+
+ def start_connect(self):
+ """ Start the timeout clock, used during a connect() attempt
+
+ :raises urllib3.exceptions.TimeoutStateError: if you attempt
+ to start a timer that has been started already.
+ """
+ if self._start_connect is not None:
+ raise TimeoutStateError("Timeout timer has already been started.")
+ self._start_connect = current_time()
+ return self._start_connect
+
+ def get_connect_duration(self):
+ """ Gets the time elapsed since the call to :meth:`start_connect`.
+
+ :return: Elapsed time.
+ :rtype: float
+ :raises urllib3.exceptions.TimeoutStateError: if you attempt
+ to get duration for a timer that hasn't been started.
+ """
+ if self._start_connect is None:
+ raise TimeoutStateError("Can't get connect duration for timer "
+ "that has not started.")
+ return current_time() - self._start_connect
+
+ @property
+ def connect_timeout(self):
+ """ Get the value to use when setting a connection timeout.
+
+ This will be a positive float or integer, the value None
+ (never timeout), or the default system timeout.
+
+ :return: Connect timeout.
+ :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
+ """
+ if self.total is None:
+ return self._connect
+
+ if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
+ return self.total
+
+ return min(self._connect, self.total)
+
+ @property
+ def read_timeout(self):
+ """ Get the value for the read timeout.
+
+ This assumes some time has elapsed in the connection timeout and
+ computes the read timeout appropriately.
+
+ If self.total is set, the read timeout is dependent on the amount of
+ time taken by the connect timeout. If the connection time has not been
+ established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
+ raised.
+
+ :return: Value to use for the read timeout.
+ :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
+ :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
+ has not yet been called on this object.
+ """
+ if (self.total is not None and
+ self.total is not self.DEFAULT_TIMEOUT and
+ self._read is not None and
+ self._read is not self.DEFAULT_TIMEOUT):
+ # In case the connect timeout has not yet been established.
+ if self._start_connect is None:
+ return self._read
+ return max(0, min(self.total - self.get_connect_duration(),
+ self._read))
+ elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
+ return max(0, self.total - self.get_connect_duration())
+ else:
+ return self._read
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/url.py b/collectors/python.d.plugin/python_modules/urllib3/util/url.py
new file mode 100644
index 000000000..99fd6534a
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/url.py
@@ -0,0 +1,231 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+from collections import namedtuple
+
+from ..exceptions import LocationParseError
+
+
+url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment']
+
+# We only want to normalize urls with an HTTP(S) scheme.
+# urllib3 infers URLs without a scheme (None) to be http.
+NORMALIZABLE_SCHEMES = ('http', 'https', None)
+
+
+class Url(namedtuple('Url', url_attrs)):
+ """
+ Datastructure for representing an HTTP URL. Used as a return value for
+ :func:`parse_url`. Both the scheme and host are normalized as they are
+ both case-insensitive according to RFC 3986.
+ """
+ __slots__ = ()
+
+ def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None,
+ query=None, fragment=None):
+ if path and not path.startswith('/'):
+ path = '/' + path
+ if scheme:
+ scheme = scheme.lower()
+ if host and scheme in NORMALIZABLE_SCHEMES:
+ host = host.lower()
+ return super(Url, cls).__new__(cls, scheme, auth, host, port, path,
+ query, fragment)
+
+ @property
+ def hostname(self):
+ """For backwards-compatibility with urlparse. We're nice like that."""
+ return self.host
+
+ @property
+ def request_uri(self):
+ """Absolute path including the query string."""
+ uri = self.path or '/'
+
+ if self.query is not None:
+ uri += '?' + self.query
+
+ return uri
+
+ @property
+ def netloc(self):
+ """Network location including host and port"""
+ if self.port:
+ return '%s:%d' % (self.host, self.port)
+ return self.host
+
+ @property
+ def url(self):
+ """
+ Convert self into a url
+
+ This function should more or less round-trip with :func:`.parse_url`. The
+ returned url may not be exactly the same as the url inputted to
+ :func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
+ with a blank port will have : removed).
+
+ Example: ::
+
+ >>> U = parse_url('http://google.com/mail/')
+ >>> U.url
+ 'http://google.com/mail/'
+ >>> Url('http', 'username:password', 'host.com', 80,
+ ... '/path', 'query', 'fragment').url
+ 'http://username:password@host.com:80/path?query#fragment'
+ """
+ scheme, auth, host, port, path, query, fragment = self
+ url = ''
+
+ # We use "is not None" we want things to happen with empty strings (or 0 port)
+ if scheme is not None:
+ url += scheme + '://'
+ if auth is not None:
+ url += auth + '@'
+ if host is not None:
+ url += host
+ if port is not None:
+ url += ':' + str(port)
+ if path is not None:
+ url += path
+ if query is not None:
+ url += '?' + query
+ if fragment is not None:
+ url += '#' + fragment
+
+ return url
+
+ def __str__(self):
+ return self.url
+
+
+def split_first(s, delims):
+ """
+ Given a string and an iterable of delimiters, split on the first found
+ delimiter. Return two split parts and the matched delimiter.
+
+ If not found, then the first part is the full input string.
+
+ Example::
+
+ >>> split_first('foo/bar?baz', '?/=')
+ ('foo', 'bar?baz', '/')
+ >>> split_first('foo/bar?baz', '123')
+ ('foo/bar?baz', '', None)
+
+ Scales linearly with number of delims. Not ideal for large number of delims.
+ """
+ min_idx = None
+ min_delim = None
+ for d in delims:
+ idx = s.find(d)
+ if idx < 0:
+ continue
+
+ if min_idx is None or idx < min_idx:
+ min_idx = idx
+ min_delim = d
+
+ if min_idx is None or min_idx < 0:
+ return s, '', None
+
+ return s[:min_idx], s[min_idx + 1:], min_delim
+
+
+def parse_url(url):
+ """
+ Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
+ performed to parse incomplete urls. Fields not provided will be None.
+
+ Partly backwards-compatible with :mod:`urlparse`.
+
+ Example::
+
+ >>> parse_url('http://google.com/mail/')
+ Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
+ >>> parse_url('google.com:80')
+ Url(scheme=None, host='google.com', port=80, path=None, ...)
+ >>> parse_url('/foo?bar')
+ Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
+ """
+
+ # While this code has overlap with stdlib's urlparse, it is much
+ # simplified for our needs and less annoying.
+ # Additionally, this implementations does silly things to be optimal
+ # on CPython.
+
+ if not url:
+ # Empty
+ return Url()
+
+ scheme = None
+ auth = None
+ host = None
+ port = None
+ path = None
+ fragment = None
+ query = None
+
+ # Scheme
+ if '://' in url:
+ scheme, url = url.split('://', 1)
+
+ # Find the earliest Authority Terminator
+ # (http://tools.ietf.org/html/rfc3986#section-3.2)
+ url, path_, delim = split_first(url, ['/', '?', '#'])
+
+ if delim:
+ # Reassemble the path
+ path = delim + path_
+
+ # Auth
+ if '@' in url:
+ # Last '@' denotes end of auth part
+ auth, url = url.rsplit('@', 1)
+
+ # IPv6
+ if url and url[0] == '[':
+ host, url = url.split(']', 1)
+ host += ']'
+
+ # Port
+ if ':' in url:
+ _host, port = url.split(':', 1)
+
+ if not host:
+ host = _host
+
+ if port:
+ # If given, ports must be integers. No whitespace, no plus or
+ # minus prefixes, no non-integer digits such as ^2 (superscript).
+ if not port.isdigit():
+ raise LocationParseError(url)
+ try:
+ port = int(port)
+ except ValueError:
+ raise LocationParseError(url)
+ else:
+ # Blank ports are cool, too. (rfc3986#section-3.2.3)
+ port = None
+
+ elif not host and url:
+ host = url
+
+ if not path:
+ return Url(scheme, auth, host, port, path, query, fragment)
+
+ # Fragment
+ if '#' in path:
+ path, fragment = path.split('#', 1)
+
+ # Query
+ if '?' in path:
+ path, query = path.split('?', 1)
+
+ return Url(scheme, auth, host, port, path, query, fragment)
+
+
+def get_host(url):
+ """
+ Deprecated. Use :func:`parse_url` instead.
+ """
+ p = parse_url(url)
+ return p.scheme or 'http', p.hostname, p.port
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/wait.py b/collectors/python.d.plugin/python_modules/urllib3/util/wait.py
new file mode 100644
index 000000000..21e72979c
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/wait.py
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: MIT
+from .selectors import (
+ HAS_SELECT,
+ DefaultSelector,
+ EVENT_READ,
+ EVENT_WRITE
+)
+
+
+def _wait_for_io_events(socks, events, timeout=None):
+ """ Waits for IO events to be available from a list of sockets
+ or optionally a single socket if passed in. Returns a list of
+ sockets that can be interacted with immediately. """
+ if not HAS_SELECT:
+ raise ValueError('Platform does not have a selector')
+ if not isinstance(socks, list):
+ # Probably just a single socket.
+ if hasattr(socks, "fileno"):
+ socks = [socks]
+ # Otherwise it might be a non-list iterable.
+ else:
+ socks = list(socks)
+ with DefaultSelector() as selector:
+ for sock in socks:
+ selector.register(sock, events)
+ return [key[0].fileobj for key in
+ selector.select(timeout) if key[1] & events]
+
+
+def wait_for_read(socks, timeout=None):
+ """ Waits for reading to be available from a list of sockets
+ or optionally a single socket if passed in. Returns a list of
+ sockets that can be read from immediately. """
+ return _wait_for_io_events(socks, EVENT_READ, timeout)
+
+
+def wait_for_write(socks, timeout=None):
+ """ Waits for writing to be available from a list of sockets
+ or optionally a single socket if passed in. Returns a list of
+ sockets that can be written to immediately. """
+ return _wait_for_io_events(socks, EVENT_WRITE, timeout)
diff --git a/collectors/python.d.plugin/rabbitmq/Makefile.inc b/collectors/python.d.plugin/rabbitmq/Makefile.inc
new file mode 100644
index 000000000..7e67ef512
--- /dev/null
+++ b/collectors/python.d.plugin/rabbitmq/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += rabbitmq/rabbitmq.chart.py
+dist_pythonconfig_DATA += rabbitmq/rabbitmq.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += rabbitmq/README.md rabbitmq/Makefile.inc
+
diff --git a/collectors/python.d.plugin/rabbitmq/README.md b/collectors/python.d.plugin/rabbitmq/README.md
new file mode 100644
index 000000000..22d367c4d
--- /dev/null
+++ b/collectors/python.d.plugin/rabbitmq/README.md
@@ -0,0 +1,56 @@
+# rabbitmq
+
+Module monitor rabbitmq performance and health metrics.
+
+Following charts are drawn:
+
+1. **Queued Messages**
+ * ready
+ * unacknowledged
+
+2. **Message Rates**
+ * ack
+ * redelivered
+ * deliver
+ * publish
+
+3. **Global Counts**
+ * channels
+ * consumers
+ * connections
+ * queues
+ * exchanges
+
+4. **File Descriptors**
+ * used descriptors
+
+5. **Socket Descriptors**
+ * used descriptors
+
+6. **Erlang processes**
+ * used processes
+
+7. **Erlang run queue**
+ * Erlang run queue
+
+8. **Memory**
+ * free memory in megabytes
+
+9. **Disk Space**
+ * free disk space in gigabytes
+
+### configuration
+
+```yaml
+socket:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 15672
+ user : 'guest'
+ pass : 'guest'
+
+```
+
+When no configuration file is found, module tries to connect to: `localhost:15672`.
+
+---
diff --git a/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py b/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py
new file mode 100644
index 000000000..8298b4032
--- /dev/null
+++ b/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py
@@ -0,0 +1,207 @@
+# -*- coding: utf-8 -*-
+# Description: rabbitmq netdata python.d module
+# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from collections import namedtuple
+from json import loads
+from socket import gethostbyname, gaierror
+from threading import Thread
+try:
+ from queue import Queue
+except ImportError:
+ from Queue import Queue
+
+from bases.FrameworkServices.UrlService import UrlService
+
+# default module values (can be overridden per job in `config`)
+update_every = 1
+priority = 60000
+retries = 60
+
+METHODS = namedtuple('METHODS', ['get_data', 'url', 'stats'])
+
+NODE_STATS = [
+ 'fd_used',
+ 'mem_used',
+ 'sockets_used',
+ 'proc_used',
+ 'disk_free',
+ 'run_queue'
+]
+
+OVERVIEW_STATS = [
+ 'object_totals.channels',
+ 'object_totals.consumers',
+ 'object_totals.connections',
+ 'object_totals.queues',
+ 'object_totals.exchanges',
+ 'queue_totals.messages_ready',
+ 'queue_totals.messages_unacknowledged',
+ 'message_stats.ack',
+ 'message_stats.redeliver',
+ 'message_stats.deliver',
+ 'message_stats.publish'
+]
+
+ORDER = [
+ 'queued_messages',
+ 'message_rates',
+ 'global_counts',
+ 'file_descriptors',
+ 'socket_descriptors',
+ 'erlang_processes',
+ 'erlang_run_queue',
+ 'memory',
+ 'disk_space'
+]
+
+CHARTS = {
+ 'file_descriptors': {
+ 'options': [None, 'File Descriptors', 'descriptors', 'overview', 'rabbitmq.file_descriptors', 'line'],
+ 'lines': [
+ ['fd_used', 'used', 'absolute']
+ ]
+ },
+ 'memory': {
+ 'options': [None, 'Memory', 'MB', 'overview', 'rabbitmq.memory', 'line'],
+ 'lines': [
+ ['mem_used', 'used', 'absolute', 1, 1024 << 10]
+ ]
+ },
+ 'disk_space': {
+ 'options': [None, 'Disk Space', 'GB', 'overview', 'rabbitmq.disk_space', 'line'],
+ 'lines': [
+ ['disk_free', 'free', 'absolute', 1, 1024 ** 3]
+ ]
+ },
+ 'socket_descriptors': {
+ 'options': [None, 'Socket Descriptors', 'descriptors', 'overview', 'rabbitmq.sockets', 'line'],
+ 'lines': [
+ ['sockets_used', 'used', 'absolute']
+ ]
+ },
+ 'erlang_processes': {
+ 'options': [None, 'Erlang Processes', 'processes', 'overview', 'rabbitmq.processes', 'line'],
+ 'lines': [
+ ['proc_used', 'used', 'absolute']
+ ]
+ },
+ 'erlang_run_queue': {
+ 'options': [None, 'Erlang Run Queue', 'processes', 'overview', 'rabbitmq.erlang_run_queue', 'line'],
+ 'lines': [
+ ['run_queue', 'length', 'absolute']
+ ]
+ },
+ 'global_counts': {
+ 'options': [None, 'Global Counts', 'counts', 'overview', 'rabbitmq.global_counts', 'line'],
+ 'lines': [
+ ['object_totals_channels', 'channels', 'absolute'],
+ ['object_totals_consumers', 'consumers', 'absolute'],
+ ['object_totals_connections', 'connections', 'absolute'],
+ ['object_totals_queues', 'queues', 'absolute'],
+ ['object_totals_exchanges', 'exchanges', 'absolute']
+ ]
+ },
+ 'queued_messages': {
+ 'options': [None, 'Queued Messages', 'messages', 'overview', 'rabbitmq.queued_messages', 'stacked'],
+ 'lines': [
+ ['queue_totals_messages_ready', 'ready', 'absolute'],
+ ['queue_totals_messages_unacknowledged', 'unacknowledged', 'absolute']
+ ]
+ },
+ 'message_rates': {
+ 'options': [None, 'Message Rates', 'messages/s', 'overview', 'rabbitmq.message_rates', 'stacked'],
+ 'lines': [
+ ['message_stats_ack', 'ack', 'incremental'],
+ ['message_stats_redeliver', 'redeliver', 'incremental'],
+ ['message_stats_deliver', 'deliver', 'incremental'],
+ ['message_stats_publish', 'publish', 'incremental']
+ ]
+ }
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.host = self.configuration.get('host', '127.0.0.1')
+ self.port = self.configuration.get('port', 15672)
+ self.scheme = self.configuration.get('scheme', 'http')
+
+ def check(self):
+ # We can't start if <host> AND <port> not specified
+ if not (self.host and self.port):
+ self.error('Host is not defined in the module configuration file')
+ return False
+
+ # Hostname -> ip address
+ try:
+ self.host = gethostbyname(self.host)
+ except gaierror as error:
+ self.error(str(error))
+ return False
+
+ # Add handlers (auth, self signed cert accept)
+ self.url = '{scheme}://{host}:{port}/api'.format(scheme=self.scheme,
+ host=self.host,
+ port=self.port)
+ # Add methods
+ api_node = self.url + '/nodes'
+ api_overview = self.url + '/overview'
+ self.methods = [METHODS(get_data=self._get_overview_stats,
+ url=api_node,
+ stats=NODE_STATS),
+ METHODS(get_data=self._get_overview_stats,
+ url=api_overview,
+ stats=OVERVIEW_STATS)]
+ return UrlService.check(self)
+
+ def _get_data(self):
+ threads = list()
+ queue = Queue()
+ result = dict()
+
+ for method in self.methods:
+ th = Thread(target=method.get_data,
+ args=(queue, method.url, method.stats))
+ th.start()
+ threads.append(th)
+
+ for thread in threads:
+ thread.join()
+ result.update(queue.get())
+
+ return result or None
+
+ def _get_overview_stats(self, queue, url, stats):
+ """
+ Format data received from http request
+ :return: dict
+ """
+
+ raw_data = self._get_raw_data(url)
+
+ if not raw_data:
+ return queue.put(dict())
+ data = loads(raw_data)
+ data = data[0] if isinstance(data, list) else data
+
+ to_netdata = fetch_data(raw_data=data, metrics=stats)
+ return queue.put(to_netdata)
+
+
+def fetch_data(raw_data, metrics):
+ data = dict()
+ for metric in metrics:
+ value = raw_data
+ metrics_list = metric.split('.')
+ try:
+ for m in metrics_list:
+ value = value[m]
+ except KeyError:
+ continue
+ data['_'.join(metrics_list)] = value
+ return data
diff --git a/collectors/python.d.plugin/rabbitmq/rabbitmq.conf b/collectors/python.d.plugin/rabbitmq/rabbitmq.conf
new file mode 100644
index 000000000..3f90da8a2
--- /dev/null
+++ b/collectors/python.d.plugin/rabbitmq/rabbitmq.conf
@@ -0,0 +1,82 @@
+# netdata python.d.plugin configuration for rabbitmq
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, rabbitmq plugin also supports the following:
+#
+# host: 'ipaddress' # Server ip address or hostname. Default: 127.0.0.1
+# port: 'port' # Rabbitmq port. Default: 15672
+# scheme: 'scheme' # http or https. Default: http
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+#
+local:
+ host: '127.0.0.1'
+ user: 'guest'
+ pass: 'guest'
diff --git a/collectors/python.d.plugin/redis/Makefile.inc b/collectors/python.d.plugin/redis/Makefile.inc
new file mode 100644
index 000000000..6aab08977
--- /dev/null
+++ b/collectors/python.d.plugin/redis/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += redis/redis.chart.py
+dist_pythonconfig_DATA += redis/redis.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += redis/README.md redis/Makefile.inc
+
diff --git a/collectors/python.d.plugin/redis/README.md b/collectors/python.d.plugin/redis/README.md
new file mode 100644
index 000000000..8d21df0ca
--- /dev/null
+++ b/collectors/python.d.plugin/redis/README.md
@@ -0,0 +1,42 @@
+# redis
+
+Get INFO data from redis instance.
+
+Following charts are drawn:
+
+1. **Operations** per second
+ * operations
+
+2. **Hit rate** in percent
+ * rate
+
+3. **Memory utilization** in kilobytes
+ * total
+ * lua
+
+4. **Database keys**
+ * lines are creates dynamically based on how many databases are there
+
+5. **Clients**
+ * connected
+ * blocked
+
+6. **Slaves**
+ * connected
+
+### configuration
+
+```yaml
+socket:
+ name : 'local'
+ socket : '/var/lib/redis/redis.sock'
+
+localhost:
+ name : 'local'
+ host : 'localhost'
+ port : 6379
+```
+
+When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:6379`.
+
+---
diff --git a/collectors/python.d.plugin/redis/redis.chart.py b/collectors/python.d.plugin/redis/redis.chart.py
new file mode 100644
index 000000000..37d55ebfe
--- /dev/null
+++ b/collectors/python.d.plugin/redis/redis.chart.py
@@ -0,0 +1,261 @@
+# -*- coding: utf-8 -*-
+# Description: redis netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import re
+
+from copy import deepcopy
+
+from bases.FrameworkServices.SocketService import SocketService
+
+REDIS_ORDER = [
+ 'operations',
+ 'hit_rate',
+ 'memory',
+ 'keys_redis',
+ 'eviction',
+ 'net',
+ 'connections',
+ 'clients',
+ 'slaves',
+ 'persistence',
+ 'bgsave_now',
+ 'bgsave_health',
+ 'uptime',
+]
+
+PIKA_ORDER = [
+ 'operations',
+ 'hit_rate',
+ 'memory',
+ 'keys_pika',
+ 'connections',
+ 'clients',
+ 'slaves',
+ 'uptime',
+]
+
+
+CHARTS = {
+ 'operations': {
+ 'options': [None, 'Operations', 'operations/s', 'operations', 'redis.operations', 'line'],
+ 'lines': [
+ ['total_commands_processed', 'commands', 'incremental'],
+ ['instantaneous_ops_per_sec', 'operations', 'absolute']
+ ]
+ },
+ 'hit_rate': {
+ 'options': [None, 'Hit rate', 'percent', 'hits', 'redis.hit_rate', 'line'],
+ 'lines': [
+ ['hit_rate', 'rate', 'absolute']
+ ]
+ },
+ 'memory': {
+ 'options': [None, 'Memory utilization', 'kilobytes', 'memory', 'redis.memory', 'line'],
+ 'lines': [
+ ['used_memory', 'total', 'absolute', 1, 1024],
+ ['used_memory_lua', 'lua', 'absolute', 1, 1024]
+ ]
+ },
+ 'net': {
+ 'options': [None, 'Bandwidth', 'kilobits/s', 'network', 'redis.net', 'area'],
+ 'lines': [
+ ['total_net_input_bytes', 'in', 'incremental', 8, 1024],
+ ['total_net_output_bytes', 'out', 'incremental', -8, 1024]
+ ]
+ },
+ 'keys_redis': {
+ 'options': [None, 'Keys per Database', 'keys', 'keys', 'redis.keys', 'line'],
+ 'lines': []
+ },
+ 'keys_pika': {
+ 'options': [None, 'Keys', 'keys', 'keys', 'redis.keys', 'line'],
+ 'lines': [
+ ['kv_keys', 'kv', 'absolute'],
+ ['hash_keys', 'hash', 'absolute'],
+ ['list_keys', 'list', 'absolute'],
+ ['zset_keys', 'zset', 'absolute'],
+ ['set_keys', 'set', 'absolute']
+ ]
+ },
+ 'eviction': {
+ 'options': [None, 'Evicted Keys', 'keys', 'keys', 'redis.eviction', 'line'],
+ 'lines': [
+ ['evicted_keys', 'evicted', 'absolute']
+ ]
+ },
+ 'connections': {
+ 'options': [None, 'Connections', 'connections/s', 'connections', 'redis.connections', 'line'],
+ 'lines': [
+ ['total_connections_received', 'received', 'incremental', 1],
+ ['rejected_connections', 'rejected', 'incremental', -1]
+ ]
+ },
+ 'clients': {
+ 'options': [None, 'Clients', 'clients', 'connections', 'redis.clients', 'line'],
+ 'lines': [
+ ['connected_clients', 'connected', 'absolute', 1],
+ ['blocked_clients', 'blocked', 'absolute', -1]
+ ]
+ },
+ 'slaves': {
+ 'options': [None, 'Slaves', 'slaves', 'replication', 'redis.slaves', 'line'],
+ 'lines': [
+ ['connected_slaves', 'connected', 'absolute']
+ ]
+ },
+ 'persistence': {
+ 'options': [None, 'Persistence Changes Since Last Save', 'changes', 'persistence',
+ 'redis.rdb_changes', 'line'],
+ 'lines': [
+ ['rdb_changes_since_last_save', 'changes', 'absolute']
+ ]
+ },
+ 'bgsave_now': {
+ 'options': [None, 'Duration of the RDB Save Operation', 'seconds', 'persistence',
+ 'redis.bgsave_now', 'absolute'],
+ 'lines': [
+ ['rdb_bgsave_in_progress', 'rdb save', 'absolute']
+ ]
+ },
+ 'bgsave_health': {
+ 'options': [None, 'Status of the Last RDB Save Operation', 'status', 'persistence',
+ 'redis.bgsave_health', 'line'],
+ 'lines': [
+ ['rdb_last_bgsave_status', 'rdb save', 'absolute']
+ ]
+ },
+ 'uptime': {
+ 'options': [None, 'Uptime', 'seconds', 'uptime', 'redis.uptime', 'line'],
+ 'lines': [
+ ['uptime_in_seconds', 'uptime', 'absolute']
+ ]
+ }
+}
+
+
+def copy_chart(name):
+ return {name: deepcopy(CHARTS[name])}
+
+
+RE = re.compile(r'\n([a-z_0-9 ]+):(?:keys=)?([^,\r]+)')
+
+
+class Service(SocketService):
+ def __init__(self, configuration=None, name=None):
+ SocketService.__init__(self, configuration=configuration, name=name)
+ self._keep_alive = True
+
+ self.order = list()
+ self.definitions = dict()
+
+ self.host = self.configuration.get('host', 'localhost')
+ self.port = self.configuration.get('port', 6379)
+ self.unix_socket = self.configuration.get('socket')
+ p = self.configuration.get('pass')
+
+ self.auth_request = 'AUTH {0} \r\n'.format(p).encode() if p else None
+ self.request = 'INFO\r\n'.encode()
+ self.bgsave_time = 0
+
+ def do_auth(self):
+ resp = self._get_raw_data(request=self.auth_request)
+ if not resp:
+ return False
+ if resp.strip() != '+OK':
+ self.error('invalid password')
+ return False
+ return True
+
+ def get_raw_and_parse(self):
+ if self.auth_request and not self.do_auth():
+ return None
+
+ resp = self._get_raw_data()
+
+ if not resp:
+ return None
+
+ parsed = RE.findall(resp)
+
+ if not parsed:
+ self.error('response is invalid/empty')
+ return None
+
+ return dict((k.replace(' ', '_'), v) for k, v in parsed)
+
+ def get_data(self):
+ """
+ Get data from socket
+ :return: dict
+ """
+ data = self.get_raw_and_parse()
+
+ if not data:
+ return None
+
+ try:
+ data['hit_rate'] = (
+ (int(data['keyspace_hits']) * 100) / (int(data['keyspace_hits']) + int(data['keyspace_misses']))
+ )
+ except (KeyError, ZeroDivisionError):
+ data['hit_rate'] = 0
+
+ if data.get('redis_version') and data.get('rdb_bgsave_in_progress'):
+ self.get_data_redis_specific(data)
+
+ return data
+
+ def get_data_redis_specific(self, data):
+ if data['rdb_bgsave_in_progress'] != '0':
+ self.bgsave_time += self.update_every
+ else:
+ self.bgsave_time = 0
+
+ data['rdb_last_bgsave_status'] = 0 if data['rdb_last_bgsave_status'] == 'ok' else 1
+ data['rdb_bgsave_in_progress'] = self.bgsave_time
+
+ def check(self):
+ """
+ Parse configuration, check if redis is available, and dynamically create chart lines data
+ :return: boolean
+ """
+ data = self.get_raw_and_parse()
+
+ if not data:
+ return False
+
+ self.order = PIKA_ORDER if data.get('pika_version') else REDIS_ORDER
+
+ for n in self.order:
+ self.definitions.update(copy_chart(n))
+
+ if data.get('redis_version'):
+ for k in data:
+ if k.startswith('db'):
+ self.definitions['keys_redis']['lines'].append([k, None, 'absolute'])
+
+ return True
+
+ def _check_raw_data(self, data):
+ """
+ Check if all data has been gathered from socket.
+ Parse first line containing message length and check against received message
+ :param data: str
+ :return: boolean
+ """
+ length = len(data)
+ supposed = data.split('\n')[0][1:-1]
+ offset = len(supposed) + 4 # 1 dollar sing, 1 new line character + 1 ending sequence '\r\n'
+ if not supposed.isdigit():
+ return True
+ supposed = int(supposed)
+
+ if length - offset >= supposed:
+ self.debug('received full response from redis')
+ return True
+
+ self.debug('waiting more data from redis')
+ return False
diff --git a/collectors/python.d.plugin/redis/redis.conf b/collectors/python.d.plugin/redis/redis.conf
new file mode 100644
index 000000000..6363f6da7
--- /dev/null
+++ b/collectors/python.d.plugin/redis/redis.conf
@@ -0,0 +1,112 @@
+# netdata python.d.plugin configuration for redis
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, redis also supports the following:
+#
+# socket: 'path/to/redis.sock'
+#
+# or
+# host: 'IP or HOSTNAME' # the host to connect to
+# port: PORT # the port to connect to
+#
+# and
+# pass: 'password' # the redis password to use for AUTH command
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+socket1:
+ name : 'local'
+ socket : '/tmp/redis.sock'
+ # pass : ''
+
+socket2:
+ name : 'local'
+ socket : '/var/run/redis/redis.sock'
+ # pass : ''
+
+socket3:
+ name : 'local'
+ socket : '/var/lib/redis/redis.sock'
+ # pass : ''
+
+localhost:
+ name : 'local'
+ host : 'localhost'
+ port : 6379
+ # pass : ''
+
+localipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 6379
+ # pass : ''
+
+localipv6:
+ name : 'local'
+ host : '::1'
+ port : 6379
+ # pass : ''
+
diff --git a/collectors/python.d.plugin/rethinkdbs/Makefile.inc b/collectors/python.d.plugin/rethinkdbs/Makefile.inc
new file mode 100644
index 000000000..dec604464
--- /dev/null
+++ b/collectors/python.d.plugin/rethinkdbs/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += rethinkdbs/rethinkdbs.chart.py
+dist_pythonconfig_DATA += rethinkdbs/rethinkdbs.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += rethinkdbs/README.md rethinkdbs/Makefile.inc
+
diff --git a/collectors/python.d.plugin/rethinkdbs/README.md b/collectors/python.d.plugin/rethinkdbs/README.md
new file mode 100644
index 000000000..5d357fa49
--- /dev/null
+++ b/collectors/python.d.plugin/rethinkdbs/README.md
@@ -0,0 +1,34 @@
+# rethinkdbs
+
+Module monitor rethinkdb health metrics.
+
+Following charts are drawn:
+
+1. **Connected Servers**
+ * connected
+ * missing
+
+2. **Active Clients**
+ * active
+
+3. **Queries** per second
+ * queries
+
+4. **Documents** per second
+ * documents
+
+### configuration
+
+```yaml
+
+localhost:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 28015
+ user : "user"
+ password : "pass"
+```
+
+When no configuration file is found, module tries to connect to `127.0.0.1:28015`.
+
+---
diff --git a/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py
new file mode 100644
index 000000000..127e9ad4b
--- /dev/null
+++ b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py
@@ -0,0 +1,235 @@
+# -*- coding: utf-8 -*-
+# Description: rethinkdb netdata python.d module
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+try:
+ import rethinkdb as rdb
+ HAS_RETHINKDB = True
+except ImportError:
+ HAS_RETHINKDB = False
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+ORDER = [
+ 'cluster_connected_servers',
+ 'cluster_clients_active',
+ 'cluster_queries',
+ 'cluster_documents',
+]
+
+
+def cluster_charts():
+ return {
+ 'cluster_connected_servers': {
+ 'options': [None, 'Connected Servers', 'servers', 'cluster', 'rethinkdb.cluster_connected_servers',
+ 'stacked'],
+ 'lines': [
+ ['cluster_servers_connected', 'connected'],
+ ['cluster_servers_missing', 'missing'],
+ ]
+ },
+ 'cluster_clients_active': {
+ 'options': [None, 'Active Clients', 'clients', 'cluster', 'rethinkdb.cluster_clients_active',
+ 'line'],
+ 'lines': [
+ ['cluster_clients_active', 'active'],
+ ]
+ },
+ 'cluster_queries': {
+ 'options': [None, 'Queries', 'queries/s', 'cluster', 'rethinkdb.cluster_queries', 'line'],
+ 'lines': [
+ ['cluster_queries_per_sec', 'queries'],
+ ]
+ },
+ 'cluster_documents': {
+ 'options': [None, 'Documents', 'documents/s', 'cluster', 'rethinkdb.cluster_documents', 'line'],
+ 'lines': [
+ ['cluster_read_docs_per_sec', 'reads'],
+ ['cluster_written_docs_per_sec', 'writes'],
+ ]
+ },
+ }
+
+
+def server_charts(n):
+ o = [
+ '{0}_client_connections'.format(n),
+ '{0}_clients_active'.format(n),
+ '{0}_queries'.format(n),
+ '{0}_documents'.format(n),
+ ]
+ f = 'server {0}'.format(n)
+
+ c = {
+ o[0]: {
+ 'options': [None, 'Client Connections', 'connections', f, 'rethinkdb.client_connections', 'line'],
+ 'lines': [
+ ['{0}_client_connections'.format(n), 'connections'],
+ ]
+ },
+ o[1]: {
+ 'options': [None, 'Active Clients', 'clients', f, 'rethinkdb.clients_active', 'line'],
+ 'lines': [
+ ['{0}_clients_active'.format(n), 'active'],
+ ]
+ },
+ o[2]: {
+ 'options': [None, 'Queries', 'queries/s', f, 'rethinkdb.queries', 'line'],
+ 'lines': [
+ ['{0}_queries_total'.format(n), 'queries', 'incremental'],
+ ]
+ },
+ o[3]: {
+ 'options': [None, 'Documents', 'documents/s', f, 'rethinkdb.documents', 'line'],
+ 'lines': [
+ ['{0}_read_docs_total'.format(n), 'reads', 'incremental'],
+ ['{0}_written_docs_total'.format(n), 'writes', 'incremental'],
+ ]
+ },
+ }
+
+ return o, c
+
+
+class Cluster:
+ def __init__(self, raw):
+ self.raw = raw
+
+ def data(self):
+ qe = self.raw['query_engine']
+
+ return {
+ 'cluster_clients_active': qe['clients_active'],
+ 'cluster_queries_per_sec': qe['queries_per_sec'],
+ 'cluster_read_docs_per_sec': qe['read_docs_per_sec'],
+ 'cluster_written_docs_per_sec': qe['written_docs_per_sec'],
+ 'cluster_servers_connected': 0,
+ 'cluster_servers_missing': 0,
+ }
+
+
+class Server:
+ def __init__(self, raw):
+ self.name = raw['server']
+ self.raw = raw
+
+ def error(self):
+ return self.raw.get('error')
+
+ def data(self):
+ qe = self.raw['query_engine']
+
+ d = {
+ 'client_connections': qe['client_connections'],
+ 'clients_active': qe['clients_active'],
+ 'queries_total': qe['queries_total'],
+ 'read_docs_total': qe['read_docs_total'],
+ 'written_docs_total': qe['written_docs_total'],
+ }
+
+ return dict(('{0}_{1}'.format(self.name, k), d[k]) for k in d)
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = list(ORDER)
+ self.definitions = cluster_charts()
+
+ self.host = self.configuration.get('host', '127.0.0.1')
+ self.port = self.configuration.get('port', 28015)
+ self.user = self.configuration.get('user', 'admin')
+ self.password = self.configuration.get('password')
+ self.timeout = self.configuration.get('timeout', 2)
+
+ self.conn = None
+ self.alive = True
+
+ def check(self):
+ if not HAS_RETHINKDB:
+ self.error('"rethinkdb" module is needed to use rethinkdbs.py')
+ return False
+
+ if not self.connect():
+ return None
+
+ stats = self.get_stats()
+
+ if not stats:
+ return None
+
+ for v in stats[1:]:
+ if get_id(v) == 'server':
+ o, c = server_charts(v['server'])
+ self.order.extend(o)
+ self.definitions.update(c)
+
+ return True
+
+ def get_data(self):
+ if not self.is_alive():
+ return None
+
+ stats = self.get_stats()
+
+ if not stats:
+ return None
+
+ data = dict()
+
+ # cluster
+ data.update(Cluster(stats[0]).data())
+
+ # servers
+ for v in stats[1:]:
+ if get_id(v) != 'server':
+ continue
+
+ s = Server(v)
+
+ if s.error():
+ data['cluster_servers_missing'] += 1
+ else:
+ data['cluster_servers_connected'] += 1
+ data.update(s.data())
+
+ return data
+
+ def get_stats(self):
+ try:
+ return list(rdb.db('rethinkdb').table('stats').run(self.conn).items)
+ except rdb.errors.ReqlError:
+ self.alive = False
+ return None
+
+ def connect(self):
+ try:
+ self.conn = rdb.connect(
+ host=self.host,
+ port=self.port,
+ user=self.user,
+ password=self.password,
+ timeout=self.timeout,
+ )
+ self.alive = True
+ return True
+ except rdb.errors.ReqlError as error:
+ self.error('Connection to {0}:{1} failed: {2}'.format(self.host, self.port, error))
+ return False
+
+ def reconnect(self):
+ # The connection is already closed after rdb.errors.ReqlError,
+ # so we do not need to call conn.close()
+ if self.connect():
+ return True
+ return False
+
+ def is_alive(self):
+ if not self.alive:
+ return self.reconnect()
+ return True
+
+
+def get_id(v):
+ return v['id'][0]
diff --git a/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf
new file mode 100644
index 000000000..73544fc2e
--- /dev/null
+++ b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf
@@ -0,0 +1,78 @@
+# netdata python.d.plugin configuration for rethinkdb
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, rethinkdb also supports the following:
+#
+# host: IP or HOSTNAME # default is 'localhost'
+# port: PORT # default is 28015
+# user: USERNAME # default is 'admin'
+# password: PASSWORD # not set by default
+# timeout: TIMEOUT # default is 2
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+local:
+ name: 'local'
+ host: 'localhost'
diff --git a/collectors/python.d.plugin/retroshare/Makefile.inc b/collectors/python.d.plugin/retroshare/Makefile.inc
new file mode 100644
index 000000000..891193e6d
--- /dev/null
+++ b/collectors/python.d.plugin/retroshare/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += retroshare/retroshare.chart.py
+dist_pythonconfig_DATA += retroshare/retroshare.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += retroshare/README.md retroshare/Makefile.inc
+
diff --git a/collectors/python.d.plugin/retroshare/README.md b/collectors/python.d.plugin/retroshare/README.md
new file mode 100644
index 000000000..e95095c65
--- /dev/null
+++ b/collectors/python.d.plugin/retroshare/README.md
@@ -0,0 +1 @@
+# retroshare
diff --git a/collectors/python.d.plugin/retroshare/retroshare.chart.py b/collectors/python.d.plugin/retroshare/retroshare.chart.py
new file mode 100644
index 000000000..1d8e35050
--- /dev/null
+++ b/collectors/python.d.plugin/retroshare/retroshare.chart.py
@@ -0,0 +1,80 @@
+# -*- coding: utf-8 -*-
+# Description: RetroShare netdata python.d module
+# Authors: sehraf
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import json
+
+from bases.FrameworkServices.UrlService import UrlService
+
+# default module values (can be overridden per job in `config`)
+# update_every = 2
+priority = 60000
+retries = 60
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = ['bandwidth', 'peers', 'dht']
+
+CHARTS = {
+ 'bandwidth': {
+ 'options': [None, 'RetroShare Bandwidth', 'kB/s', 'RetroShare', 'retroshare.bandwidth', 'area'],
+ 'lines': [
+ ['bandwidth_up_kb', 'Upload'],
+ ['bandwidth_down_kb', 'Download']
+ ]
+ },
+ 'peers': {
+ 'options': [None, 'RetroShare Peers', 'peers', 'RetroShare', 'retroshare.peers', 'line'],
+ 'lines': [
+ ['peers_all', 'All friends'],
+ ['peers_connected', 'Connected friends']
+ ]
+ },
+ 'dht': {
+ 'options': [None, 'Retroshare DHT', 'peers', 'RetroShare', 'retroshare.dht', 'line'],
+ 'lines': [
+ ['dht_size_all', 'DHT nodes estimated'],
+ ['dht_size_rs', 'RS nodes estimated']
+ ]
+ }
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.baseurl = self.configuration.get('url', 'http://localhost:9090')
+ self.order = ORDER
+ self.definitions = CHARTS
+
+ def _get_stats(self):
+ """
+ Format data received from http request
+ :return: dict
+ """
+ try:
+ raw = self._get_raw_data()
+ parsed = json.loads(raw)
+ if str(parsed['returncode']) != 'ok':
+ return None
+ except (TypeError, ValueError):
+ return None
+
+ return parsed['data'][0]
+
+ def _get_data(self):
+ """
+ Get data from API
+ :return: dict
+ """
+ self.url = self.baseurl + '/api/v2/stats'
+ data = self._get_stats()
+ if data is None:
+ return None
+
+ data['bandwidth_up_kb'] = data['bandwidth_up_kb'] * -1
+ if data['dht_active'] is False:
+ data['dht_size_all'] = None
+ data['dht_size_rs'] = None
+
+ return data
diff --git a/collectors/python.d.plugin/retroshare/retroshare.conf b/collectors/python.d.plugin/retroshare/retroshare.conf
new file mode 100644
index 000000000..9c92583f7
--- /dev/null
+++ b/collectors/python.d.plugin/retroshare/retroshare.conf
@@ -0,0 +1,74 @@
+# netdata python.d.plugin configuration for RetroShare
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, RetroShare also supports the following:
+#
+# - url: 'url' # the URL to the WebUI
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name: 'local'
+ url: 'http://localhost:9090'
diff --git a/collectors/python.d.plugin/samba/Makefile.inc b/collectors/python.d.plugin/samba/Makefile.inc
new file mode 100644
index 000000000..230a8ba43
--- /dev/null
+++ b/collectors/python.d.plugin/samba/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += samba/samba.chart.py
+dist_pythonconfig_DATA += samba/samba.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += samba/README.md samba/Makefile.inc
+
diff --git a/collectors/python.d.plugin/samba/README.md b/collectors/python.d.plugin/samba/README.md
new file mode 100644
index 000000000..44610d373
--- /dev/null
+++ b/collectors/python.d.plugin/samba/README.md
@@ -0,0 +1,67 @@
+# samba
+
+Performance metrics of Samba file sharing.
+
+**Requirements:**
+* `smbstatus` program
+* `sudo` program
+* `smbd` must be compiled with profiling enabled
+* `smbd` must be started either with the `-P 1` option or inside `smb.conf` using `smbd profiling level`
+* `netdata` user needs to be able to sudo the `smbstatus` program without password
+
+It produces the following charts:
+
+1. **Syscall R/Ws** in kilobytes/s
+ * sendfile
+ * recvfle
+
+2. **Smb2 R/Ws** in kilobytes/s
+ * readout
+ * writein
+ * readin
+ * writeout
+
+3. **Smb2 Create/Close** in operations/s
+ * create
+ * close
+
+4. **Smb2 Info** in operations/s
+ * getinfo
+ * setinfo
+
+5. **Smb2 Find** in operations/s
+ * find
+
+6. **Smb2 Notify** in operations/s
+ * notify
+
+7. **Smb2 Lesser Ops** as counters
+ * tcon
+ * negprot
+ * tdis
+ * cancel
+ * logoff
+ * flush
+ * lock
+ * keepalive
+ * break
+ * sessetup
+
+### prerequisite
+This module uses `smbstatus` which can only be executed by root. It uses
+`sudo` and assumes that it is configured such that the `netdata` user can
+execute `smbstatus` as root without password.
+
+Add to `sudoers`:
+
+ netdata ALL=(root) NOPASSWD: /path/to/smbstatus
+
+### configuration
+
+ **samba** is disabled by default. Should be explicitly enabled in `python.d.conf`.
+
+```yaml
+samba: yes
+```
+
+---
diff --git a/collectors/python.d.plugin/samba/samba.chart.py b/collectors/python.d.plugin/samba/samba.chart.py
new file mode 100644
index 000000000..b2278de9e
--- /dev/null
+++ b/collectors/python.d.plugin/samba/samba.chart.py
@@ -0,0 +1,138 @@
+# -*- coding: utf-8 -*-
+# Description: samba netdata python.d module
+# Author: Christopher Cox <chris_cox@endlessnow.com>
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# The netdata user needs to be able to be able to sudo the smbstatus program
+# without password:
+# netdata ALL=(ALL) NOPASSWD: /usr/bin/smbstatus -P
+#
+# This makes calls to smbstatus -P
+#
+# This just looks at a couple of values out of syscall, and some from smb2.
+#
+# The Lesser Ops chart is merely a display of current counter values. They
+# didn't seem to change much to me. However, if you notice something changing
+# a lot there, bring one or more out into its own chart and make it incremental
+# (like find and notify... good examples).
+
+import re
+
+from bases.collection import find_binary
+from bases.FrameworkServices.ExecutableService import ExecutableService
+
+
+disabled_by_default = True
+
+# default module values (can be overridden per job in `config`)
+update_every = 5
+priority = 60000
+retries = 60
+
+ORDER = [
+ 'syscall_rw',
+ 'smb2_rw',
+ 'smb2_create_close',
+ 'smb2_info',
+ 'smb2_find',
+ 'smb2_notify',
+ 'smb2_sm_count'
+]
+
+CHARTS = {
+ 'syscall_rw': {
+ 'options': [None, 'R/Ws', 'kilobytes/s', 'syscall', 'syscall.rw', 'area'],
+ 'lines': [
+ ['syscall_sendfile_bytes', 'sendfile', 'incremental', 1, 1024],
+ ['syscall_recvfile_bytes', 'recvfile', 'incremental', -1, 1024]
+ ]
+ },
+ 'smb2_rw': {
+ 'options': [None, 'R/Ws', 'kilobytes/s', 'smb2', 'smb2.rw', 'area'],
+ 'lines': [
+ ['smb2_read_outbytes', 'readout', 'incremental', 1, 1024],
+ ['smb2_write_inbytes', 'writein', 'incremental', -1, 1024],
+ ['smb2_read_inbytes', 'readin', 'incremental', 1, 1024],
+ ['smb2_write_outbytes', 'writeout', 'incremental', -1, 1024]
+ ]
+ },
+ 'smb2_create_close': {
+ 'options': [None, 'Create/Close', 'operations/s', 'smb2', 'smb2.create_close', 'line'],
+ 'lines': [
+ ['smb2_create_count', 'create', 'incremental', 1, 1],
+ ['smb2_close_count', 'close', 'incremental', -1, 1]
+ ]
+ },
+ 'smb2_info': {
+ 'options': [None, 'Info', 'operations/s', 'smb2', 'smb2.get_set_info', 'line'],
+ 'lines': [
+ ['smb2_getinfo_count', 'getinfo', 'incremental', 1, 1],
+ ['smb2_setinfo_count', 'setinfo', 'incremental', -1, 1]
+ ]
+ },
+ 'smb2_find': {
+ 'options': [None, 'Find', 'operations/s', 'smb2', 'smb2.find', 'line'],
+ 'lines': [
+ ['smb2_find_count', 'find', 'incremental', 1, 1]
+ ]
+ },
+ 'smb2_notify': {
+ 'options': [None, 'Notify', 'operations/s', 'smb2', 'smb2.notify', 'line'],
+ 'lines': [
+ ['smb2_notify_count', 'notify', 'incremental', 1, 1]
+ ]
+ },
+ 'smb2_sm_count': {
+ 'options': [None, 'Lesser Ops', 'count', 'smb2', 'smb2.sm_counters', 'stacked'],
+ 'lines': [
+ ['smb2_tcon_count', 'tcon', 'absolute', 1, 1],
+ ['smb2_negprot_count', 'negprot', 'absolute', 1, 1],
+ ['smb2_tdis_count', 'tdis', 'absolute', 1, 1],
+ ['smb2_cancel_count', 'cancel', 'absolute', 1, 1],
+ ['smb2_logoff_count', 'logoff', 'absolute', 1, 1],
+ ['smb2_flush_count', 'flush', 'absolute', 1, 1],
+ ['smb2_lock_count', 'lock', 'absolute', 1, 1],
+ ['smb2_keepalive_count', 'keepalive', 'absolute', 1, 1],
+ ['smb2_break_count', 'break', 'absolute', 1, 1],
+ ['smb2_sessetup_count', 'sessetup', 'absolute', 1, 1]
+ ]
+ }
+}
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.rgx_smb2 = re.compile(r'(smb2_[^:]+|syscall_.*file_bytes):\s+(\d+)')
+
+ def check(self):
+ sudo_binary, smbstatus_binary = find_binary('sudo'), find_binary('smbstatus')
+
+ if not (sudo_binary and smbstatus_binary):
+ self.error("Can\'t locate 'sudo' or 'smbstatus' binary")
+ return False
+
+ self.command = [sudo_binary, '-v']
+ err = self._get_raw_data(stderr=True)
+ if err:
+ self.error(''.join(err))
+ return False
+
+ self.command = ' '.join([sudo_binary, '-n', smbstatus_binary, '-P'])
+
+ return ExecutableService.check(self)
+
+ def _get_data(self):
+ """
+ Format data received from shell command
+ :return: dict
+ """
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+
+ parsed = self.rgx_smb2.findall(' '.join(raw_data))
+
+ return dict(parsed) or None
diff --git a/collectors/python.d.plugin/samba/samba.conf b/collectors/python.d.plugin/samba/samba.conf
new file mode 100644
index 000000000..ee513c60f
--- /dev/null
+++ b/collectors/python.d.plugin/samba/samba.conf
@@ -0,0 +1,62 @@
+# netdata python.d.plugin configuration for samba
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+update_every: 5
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds \ No newline at end of file
diff --git a/collectors/python.d.plugin/sensors/Makefile.inc b/collectors/python.d.plugin/sensors/Makefile.inc
new file mode 100644
index 000000000..5fb26e1c8
--- /dev/null
+++ b/collectors/python.d.plugin/sensors/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += sensors/sensors.chart.py
+dist_pythonconfig_DATA += sensors/sensors.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += sensors/README.md sensors/Makefile.inc
+
diff --git a/collectors/python.d.plugin/sensors/README.md b/collectors/python.d.plugin/sensors/README.md
new file mode 100644
index 000000000..eb1642d90
--- /dev/null
+++ b/collectors/python.d.plugin/sensors/README.md
@@ -0,0 +1,17 @@
+# sensors
+
+System sensors information.
+
+Charts are created dynamically.
+
+### configuration
+
+For detailed configuration information please read [`sensors.conf`](sensors.conf) file.
+
+### possible issues
+
+There have been reports from users that on certain servers, ACPI ring buffer errors are printed by the kernel (`dmesg`) when ACPI sensors are being accessed.
+We are tracking such cases in issue [#827](https://github.com/netdata/netdata/issues/827).
+Please join this discussion for help.
+
+---
diff --git a/collectors/python.d.plugin/sensors/sensors.chart.py b/collectors/python.d.plugin/sensors/sensors.chart.py
new file mode 100644
index 000000000..69d2bfe99
--- /dev/null
+++ b/collectors/python.d.plugin/sensors/sensors.chart.py
@@ -0,0 +1,146 @@
+# -*- coding: utf-8 -*-
+# Description: sensors netdata python.d plugin
+# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from bases.FrameworkServices.SimpleService import SimpleService
+from third_party import lm_sensors as sensors
+
+# default module values (can be overridden per job in `config`)
+# update_every = 2
+
+ORDER = ['temperature', 'fan', 'voltage', 'current', 'power', 'energy', 'humidity']
+
+# This is a prototype of chart definition which is used to dynamically create self.definitions
+CHARTS = {
+ 'temperature': {
+ 'options': [None, ' temperature', 'Celsius', 'temperature', 'sensors.temperature', 'line'],
+ 'lines': [
+ [None, None, 'absolute', 1, 1000]
+ ]
+ },
+ 'voltage': {
+ 'options': [None, ' voltage', 'Volts', 'voltage', 'sensors.voltage', 'line'],
+ 'lines': [
+ [None, None, 'absolute', 1, 1000]
+ ]
+ },
+ 'current': {
+ 'options': [None, ' current', 'Ampere', 'current', 'sensors.current', 'line'],
+ 'lines': [
+ [None, None, 'absolute', 1, 1000]
+ ]
+ },
+ 'power': {
+ 'options': [None, ' power', 'Watt', 'power', 'sensors.power', 'line'],
+ 'lines': [
+ [None, None, 'absolute', 1, 1000000]
+ ]
+ },
+ 'fan': {
+ 'options': [None, ' fans speed', 'Rotations/min', 'fans', 'sensors.fan', 'line'],
+ 'lines': [
+ [None, None, 'absolute', 1, 1000]
+ ]
+ },
+ 'energy': {
+ 'options': [None, ' energy', 'Joule', 'energy', 'sensors.energy', 'areastack'],
+ 'lines': [
+ [None, None, 'incremental', 1, 1000000]
+ ]
+ },
+ 'humidity': {
+ 'options': [None, ' humidity', 'Percent', 'humidity', 'sensors.humidity', 'line'],
+ 'lines': [
+ [None, None, 'absolute', 1, 1000]
+ ]
+ }
+}
+
+LIMITS = {
+ 'temperature': [-127, 1000],
+ 'voltage': [-127, 127],
+ 'current': [-127, 127],
+ 'fan': [0, 65535]
+}
+
+TYPE_MAP = {
+ 0: 'voltage',
+ 1: 'fan',
+ 2: 'temperature',
+ 3: 'power',
+ 4: 'energy',
+ 5: 'current',
+ 6: 'humidity',
+ 7: 'max_main',
+ 16: 'vid',
+ 17: 'intrusion',
+ 18: 'max_other',
+ 24: 'beep_enable'
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = list()
+ self.definitions = dict()
+ self.chips = list()
+
+ def get_data(self):
+ data = dict()
+ try:
+ for chip in sensors.ChipIterator():
+ prefix = sensors.chip_snprintf_name(chip)
+ for feature in sensors.FeatureIterator(chip):
+ sfi = sensors.SubFeatureIterator(chip, feature)
+ for sf in sfi:
+ val = sensors.get_value(chip, sf.number)
+ break
+ type_name = TYPE_MAP[feature.type]
+ if type_name in LIMITS:
+ limit = LIMITS[type_name]
+ if val < limit[0] or val > limit[1]:
+ continue
+ data[prefix + '_' + str(feature.name.decode())] = int(val * 1000)
+ except Exception as error:
+ self.error(error)
+ return None
+
+ return data or None
+
+ def create_definitions(self):
+ for sensor in ORDER:
+ for chip in sensors.ChipIterator():
+ chip_name = sensors.chip_snprintf_name(chip)
+ if self.chips and not any([chip_name.startswith(ex) for ex in self.chips]):
+ continue
+ for feature in sensors.FeatureIterator(chip):
+ sfi = sensors.SubFeatureIterator(chip, feature)
+ vals = [sensors.get_value(chip, sf.number) for sf in sfi]
+ if vals[0] == 0:
+ continue
+ if TYPE_MAP[feature.type] == sensor:
+ # create chart
+ name = chip_name + '_' + TYPE_MAP[feature.type]
+ if name not in self.order:
+ self.order.append(name)
+ chart_def = list(CHARTS[sensor]['options'])
+ chart_def[1] = chip_name + chart_def[1]
+ self.definitions[name] = {'options': chart_def}
+ self.definitions[name]['lines'] = []
+ line = list(CHARTS[sensor]['lines'][0])
+ line[0] = chip_name + '_' + str(feature.name.decode())
+ line[1] = sensors.get_label(chip, feature)
+ self.definitions[name]['lines'].append(line)
+
+ def check(self):
+ try:
+ sensors.init()
+ except Exception as error:
+ self.error(error)
+ return False
+
+ self.create_definitions()
+
+ return True
diff --git a/collectors/python.d.plugin/sensors/sensors.conf b/collectors/python.d.plugin/sensors/sensors.conf
new file mode 100644
index 000000000..83bbffd7d
--- /dev/null
+++ b/collectors/python.d.plugin/sensors/sensors.conf
@@ -0,0 +1,63 @@
+# netdata python.d.plugin configuration for sensors
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# Limit the number of sensors types.
+# Comment the ones you want to disable.
+# Also, re-arranging this list controls the order of the charts at the
+# netdata dashboard.
+
+types:
+ - temperature
+ - fan
+ - voltage
+ - current
+ - power
+ - energy
+ - humidity
+
+# ----------------------------------------------------------------------
+# Limit the number of sensors chips.
+# Uncomment the first line (chips:) and add chip names below it.
+# The chip names that start with like that will be matched.
+# You can find the chip names using the sensors command.
+
+#chips:
+# - i8k
+# - coretemp
+#
+# chip names can be found using the sensors shell command
+# the prefix is matched (anything that starts like that)
+#
+#----------------------------------------------------------------------
+
diff --git a/collectors/python.d.plugin/smartd_log/Makefile.inc b/collectors/python.d.plugin/smartd_log/Makefile.inc
new file mode 100644
index 000000000..dc1d0f3fb
--- /dev/null
+++ b/collectors/python.d.plugin/smartd_log/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += smartd_log/smartd_log.chart.py
+dist_pythonconfig_DATA += smartd_log/smartd_log.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += smartd_log/README.md smartd_log/Makefile.inc
+
diff --git a/collectors/python.d.plugin/smartd_log/README.md b/collectors/python.d.plugin/smartd_log/README.md
new file mode 100644
index 000000000..121a63573
--- /dev/null
+++ b/collectors/python.d.plugin/smartd_log/README.md
@@ -0,0 +1,38 @@
+# smartd_log
+
+Module monitor `smartd` log files to collect HDD/SSD S.M.A.R.T attributes.
+
+It produces following charts (you can add additional attributes in the module configuration file):
+
+1. **Read Error Rate** attribute 1
+
+2. **Start/Stop Count** attribute 4
+
+3. **Reallocated Sectors Count** attribute 5
+
+4. **Seek Error Rate** attribute 7
+
+5. **Power-On Hours Count** attribute 9
+
+6. **Power Cycle Count** attribute 12
+
+7. **Load/Unload Cycles** attribute 193
+
+8. **Temperature** attribute 194
+
+9. **Current Pending Sectors** attribute 197
+
+10. **Off-Line Uncorrectable** attribute 198
+
+11. **Write Error Rate** attribute 200
+
+### configuration
+
+```yaml
+local:
+ log_path : '/var/log/smartd/'
+```
+
+If no configuration is given, module will attempt to read log files in /var/log/smartd/ directory.
+
+---
diff --git a/collectors/python.d.plugin/smartd_log/smartd_log.chart.py b/collectors/python.d.plugin/smartd_log/smartd_log.chart.py
new file mode 100644
index 000000000..21dbccecc
--- /dev/null
+++ b/collectors/python.d.plugin/smartd_log/smartd_log.chart.py
@@ -0,0 +1,353 @@
+# -*- coding: utf-8 -*-
+# Description: smart netdata python.d module
+# Author: l2isbad, vorph1
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import os
+import re
+
+from collections import namedtuple
+from time import time
+
+from bases.collection import read_last_line
+from bases.FrameworkServices.SimpleService import SimpleService
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = ['1', '4', '5', '7', '9', '12', '193', '194', '197', '198', '200']
+
+SMART_ATTR = {
+ '1': 'Read Error Rate',
+ '2': 'Throughput Performance',
+ '3': 'Spin-Up Time',
+ '4': 'Start/Stop Count',
+ '5': 'Reallocated Sectors Count',
+ '6': 'Read Channel Margin',
+ '7': 'Seek Error Rate',
+ '8': 'Seek Time Performance',
+ '9': 'Power-On Hours Count',
+ '10': 'Spin-up Retries',
+ '11': 'Calibration Retries',
+ '12': 'Power Cycle Count',
+ '13': 'Soft Read Error Rate',
+ '100': 'Erase/Program Cycles',
+ '103': 'Translation Table Rebuild',
+ '108': 'Unknown (108)',
+ '170': 'Reserved Block Count',
+ '171': 'Program Fail Count',
+ '172': 'Erase Fail Count',
+ '173': 'Wear Leveller Worst Case Erase Count',
+ '174': 'Unexpected Power Loss',
+ '175': 'Program Fail Count',
+ '176': 'Erase Fail Count',
+ '177': 'Wear Leveling Count',
+ '178': 'Used Reserved Block Count',
+ '179': 'Used Reserved Block Count',
+ '180': 'Unused Reserved Block Count',
+ '181': 'Program Fail Count',
+ '182': 'Erase Fail Count',
+ '183': 'SATA Downshifts',
+ '184': 'End-to-End error',
+ '185': 'Head Stability',
+ '186': 'Induced Op-Vibration Detection',
+ '187': 'Reported Uncorrectable Errors',
+ '188': 'Command Timeout',
+ '189': 'High Fly Writes',
+ '190': 'Temperature',
+ '191': 'G-Sense Errors',
+ '192': 'Power-Off Retract Cycles',
+ '193': 'Load/Unload Cycles',
+ '194': 'Temperature',
+ '195': 'Hardware ECC Recovered',
+ '196': 'Reallocation Events',
+ '197': 'Current Pending Sectors',
+ '198': 'Off-line Uncorrectable',
+ '199': 'UDMA CRC Error Rate',
+ '200': 'Write Error Rate',
+ '201': 'Soft Read Errors',
+ '202': 'Data Address Mark Errors',
+ '203': 'Run Out Cancel',
+ '204': 'Soft ECC Corrections',
+ '205': 'Thermal Asperity Rate',
+ '206': 'Flying Height',
+ '207': 'Spin High Current',
+ '209': 'Offline Seek Performance',
+ '220': 'Disk Shift',
+ '221': 'G-Sense Error Rate',
+ '222': 'Loaded Hours',
+ '223': 'Load/Unload Retries',
+ '224': 'Load Friction',
+ '225': 'Load/Unload Cycles',
+ '226': 'Load-in Time',
+ '227': 'Torque Amplification Count',
+ '228': 'Power-Off Retracts',
+ '230': 'GMR Head Amplitude',
+ '231': 'Temperature',
+ '232': 'Available Reserved Space',
+ '233': 'Media Wearout Indicator',
+ '240': 'Head Flying Hours',
+ '241': 'Total LBAs Written',
+ '242': 'Total LBAs Read',
+ '250': 'Read Error Retry Rate'
+}
+
+LIMIT = namedtuple('LIMIT', ['min', 'max'])
+
+LIMITS = {
+ '194': LIMIT(0, 200)
+}
+
+RESCAN_INTERVAL = 60
+
+REGEX = re.compile(
+ '(\d+);' # attribute
+ '(\d+);' # normalized value
+ '(\d+)', # raw value
+ re.X
+)
+
+
+def chart_template(chart_name):
+ units, attr_id = chart_name.split('_')[-2:]
+ title = '{value_type} {description}'.format(value_type=units.capitalize(),
+ description=SMART_ATTR[attr_id])
+ family = SMART_ATTR[attr_id].lower()
+
+ return {
+ chart_name: {
+ 'options': [None, title, units, family, 'smartd_log.' + chart_name, 'line'],
+ 'lines': []
+ }
+ }
+
+
+def handle_os_error(method):
+ def on_call(*args):
+ try:
+ return method(*args)
+ except OSError:
+ return None
+ return on_call
+
+
+class SmartAttribute(object):
+ def __init__(self, idx, normalized, raw):
+ self.id = idx
+ self.normalized = normalized
+ self._raw = raw
+
+ @property
+ def raw(self):
+ if self.id in LIMITS:
+ limit = LIMITS[self.id]
+ if limit.min <= int(self._raw) <= limit.max:
+ return self._raw
+ return None
+ return self._raw
+
+ @raw.setter
+ def raw(self, value):
+ self._raw = value
+
+
+class DiskLogFile:
+ def __init__(self, path):
+ self.path = path
+ self.size = os.path.getsize(path)
+
+ @handle_os_error
+ def is_changed(self):
+ new_size = os.path.getsize(self.path)
+ old_size, self.size = self.size, new_size
+
+ return new_size != old_size and new_size
+
+ @staticmethod
+ @handle_os_error
+ def is_valid(log_file, exclude):
+ return all([log_file.endswith('.csv'),
+ not [p for p in exclude if p in log_file],
+ os.access(log_file, os.R_OK),
+ os.path.getsize(log_file)])
+
+
+class Disk:
+ def __init__(self, full_path, age):
+ self.log_file = DiskLogFile(full_path)
+ self.name = os.path.basename(full_path).split('.')[-3]
+ self.age = int(age)
+ self.status = True
+ self.attributes = dict()
+
+ self.get_attributes()
+
+ def __eq__(self, other):
+ if isinstance(other, Disk):
+ return self.name == other.name
+ return self.name == other
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __hash__(self):
+ return hash(repr(self))
+
+ @handle_os_error
+ def is_active(self):
+ return (time() - os.path.getmtime(self.log_file.path)) / 60 < self.age
+
+ @handle_os_error
+ def get_attributes(self):
+ last_line = read_last_line(self.log_file.path)
+ self.attributes = dict((attr, SmartAttribute(attr, normalized, raw)) for attr, normalized, raw
+ in REGEX.findall(last_line))
+ return True
+
+ def data(self):
+ data = dict()
+ for attr in self.attributes.values():
+ data['_'.join([self.name, 'normalized', attr.id])] = attr.normalized
+ if attr.raw is not None:
+ data['_'.join([self.name, 'raw', attr.id])] = attr.raw
+ return data
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.log_path = self.configuration.get('log_path', '/var/log/smartd')
+ self.raw = self.configuration.get('raw_values', True)
+ self.exclude = self.configuration.get('exclude_disks', str()).split()
+ self.age = self.configuration.get('age', 30)
+
+ self.runs = 0
+ self.disks = list()
+ self.order = list()
+ self.definitions = dict()
+
+ def check(self):
+ self.disks = self.scan()
+
+ if not self.disks:
+ return None
+
+ user_defined_sa = self.configuration.get('smart_attributes')
+
+ if user_defined_sa:
+ order = user_defined_sa.split() or ORDER
+ else:
+ order = ORDER
+
+ self.create_charts(order)
+
+ return True
+
+ def get_data(self):
+ self.runs += 1
+
+ if self.runs % RESCAN_INTERVAL == 0:
+ self.cleanup_and_rescan()
+
+ data = dict()
+
+ for disk in self.disks:
+
+ if not disk.status:
+ continue
+
+ changed = disk.log_file.is_changed()
+
+ # True = changed, False = unchanged, None = Exception
+ if changed is None:
+ disk.status = False
+ continue
+
+ if changed:
+ success = disk.get_attributes()
+ if not success:
+ disk.status = False
+ continue
+
+ data.update(disk.data())
+
+ return data or None
+
+ def create_charts(self, order):
+ for attr in order:
+ raw_name, normalized_name = 'attr_id_raw_' + attr, 'attr_id_normalized_' + attr
+ raw, normalized = chart_template(raw_name), chart_template(normalized_name)
+ self.order.extend([normalized_name, raw_name])
+ self.definitions.update(raw)
+ self.definitions.update(normalized)
+
+ for disk in self.disks:
+ if attr not in disk.attributes:
+ self.debug("'{disk}' has no attribute '{attr_id}'".format(disk=disk.name,
+ attr_id=attr))
+ continue
+ normalized[normalized_name]['lines'].append(['_'.join([disk.name, 'normalized', attr]), disk.name])
+
+ if not self.raw:
+ continue
+
+ if disk.attributes[attr].raw is not None:
+ raw[raw_name]['lines'].append(['_'.join([disk.name, 'raw', attr]), disk.name])
+ continue
+ self.debug("'{disk}' attribute '{attr_id}' value not in {limits}".format(disk=disk.name,
+ attr_id=attr,
+ limits=LIMITS[attr]))
+
+ def cleanup_and_rescan(self):
+ self.cleanup()
+ new_disks = self.scan(only_new=True)
+
+ for disk in new_disks:
+ valid = False
+
+ for chart in self.charts:
+ value_type, idx = chart.id.split('_')[2:]
+
+ if idx in disk.attributes:
+ valid = True
+ dimension_id = '_'.join([disk.name, value_type, idx])
+
+ if dimension_id in chart:
+ chart.hide_dimension(dimension_id=dimension_id, reverse=True)
+ else:
+ chart.add_dimension([dimension_id, disk.name])
+ if valid:
+ self.disks.append(disk)
+
+ def cleanup(self):
+ for disk in self.disks:
+
+ if not disk.is_active():
+ disk.status = False
+ if not disk.status:
+ for chart in self.charts:
+ dimension_id = '_'.join([disk.name, chart.id[8:]])
+ chart.hide_dimension(dimension_id=dimension_id)
+
+ self.disks = [disk for disk in self.disks if disk.status]
+
+ def scan(self, only_new=None):
+ new_disks = list()
+ for f in os.listdir(self.log_path):
+ full_path = os.path.join(self.log_path, f)
+
+ if DiskLogFile.is_valid(full_path, self.exclude):
+ disk = Disk(full_path, self.age)
+
+ active = disk.is_active()
+ if active is None:
+ continue
+ if active:
+ if not only_new:
+ new_disks.append(disk)
+ else:
+ if disk not in self.disks:
+ new_disks.append(disk)
+ else:
+ if not only_new:
+ self.debug("'{disk}' not updated in the last {age} minutes, "
+ "skipping it.".format(disk=disk.name, age=self.age))
+ return new_disks
diff --git a/collectors/python.d.plugin/smartd_log/smartd_log.conf b/collectors/python.d.plugin/smartd_log/smartd_log.conf
new file mode 100644
index 000000000..3fab3f1c0
--- /dev/null
+++ b/collectors/python.d.plugin/smartd_log/smartd_log.conf
@@ -0,0 +1,90 @@
+# netdata python.d.plugin configuration for smartd log
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, smartd_log also supports the following:
+#
+# log_path: '/path/to/smartdlogs' # path to smartd log files. Default is /var/log/smartd
+# raw_values: yes # enable/disable raw values charts. Enabled by default.
+# smart_attributes: '1 2 3 4 44' # smart attributes charts. Default are ['1', '4', '5', '7', '9', '12', '193', '194', '197', '198', '200'].
+# exclude_disks: 'PATTERN1 PATTERN2' # space separated patterns. If the pattern is in the drive name, the module will not collect data for it.
+#
+# ----------------------------------------------------------------------
+# Additional information
+# Plugin reads smartd log files (-A option).
+# You need to add (man smartd) to /etc/default/smartmontools '-i 600 -A /var/log/smartd/' to pass additional options to smartd on startup
+# Then restart smartd service and check /path/log/smartdlogs
+# ls /var/log/smartd/
+# CDC_WD10EZEX_00BN5A0-WD_WCC3F7FLVZS9.ata.csv WDC_WD10EZEX_00BN5A0-WD_WCC3F7FLVZS9.ata.csv ZDC_WD10EZEX_00BN5A0-WD_WCC3F7FLVZS9.ata.csv
+#
+# Smartd APPEND logs at every run. Its NOT RECOMMENDED to set '-i' option below 60 sec.
+# STRONGLY RECOMMENDED to create smartd conf file for logrotate
+#
+# RAW vs NORMALIZED values
+# "Normalized value", commonly referred to as just "value". This is a most universal measurement, on the scale from 0 (bad) to some maximum (good) value.
+# Maximum values are typically 100, 200 or 253. Rule of thumb is: high values are good, low values are bad.
+#
+# "Raw value" - the value of the attribute as it is tracked by the device, before any normalization takes place.
+# Some raw numbers provide valuable insight when properly interpreted. These cases will be discussed later on.
+# Raw values are typically listed in hexadecimal numbers. The raw value has different structure for different vendors and is often not meaningful as a decimal number.
+#
+# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/spigotmc/Makefile.inc b/collectors/python.d.plugin/spigotmc/Makefile.inc
new file mode 100644
index 000000000..f9fa8b6b0
--- /dev/null
+++ b/collectors/python.d.plugin/spigotmc/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += spigotmc/spigotmc.chart.py
+dist_pythonconfig_DATA += spigotmc/spigotmc.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += spigotmc/README.md spigotmc/Makefile.inc
+
diff --git a/collectors/python.d.plugin/spigotmc/README.md b/collectors/python.d.plugin/spigotmc/README.md
new file mode 100644
index 000000000..ae5602587
--- /dev/null
+++ b/collectors/python.d.plugin/spigotmc/README.md
@@ -0,0 +1,22 @@
+# spigotmc
+
+This module does some really basic monitoring for Spigot Minecraft servers.
+
+It provides two charts, one tracking server-side ticks-per-second in
+1, 5 and 15 minute averages, and one tracking the number of currently
+active users.
+
+This is not compatible with Spigot plugins which change the format of
+the data returned by the `tps` or `list` console commands.
+
+### configuration
+
+```yaml
+host: localhost
+port: 25575
+password: pass
+```
+
+By default, a connection to port 25575 on the local system is attempted with an empty password.
+
+---
diff --git a/collectors/python.d.plugin/spigotmc/spigotmc.chart.py b/collectors/python.d.plugin/spigotmc/spigotmc.chart.py
new file mode 100644
index 000000000..a5e5ee0ee
--- /dev/null
+++ b/collectors/python.d.plugin/spigotmc/spigotmc.chart.py
@@ -0,0 +1,120 @@
+# -*- coding: utf-8 -*-
+# Description: spigotmc netdata python.d module
+# Author: Austin S. Hemmelgarn (Ferroin)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import socket
+import platform
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+from third_party import mcrcon
+
+# Update only every 5 seconds because collection takes in excess of
+# 100ms sometimes, and mos tpeople won't care about second-by-second data.
+update_every = 5
+
+PRECISION = 100
+
+ORDER = ['tps', 'users']
+
+CHARTS = {
+ 'tps': {
+ 'options': [None, 'Spigot Ticks Per Second', 'ticks', 'spigotmc', 'spigotmc.tps', 'line'],
+ 'lines': [
+ ['tps1', '1 Minute Average', 'absolute', 1, PRECISION],
+ ['tps5', '5 Minute Average', 'absolute', 1, PRECISION],
+ ['tps15', '15 Minute Average', 'absolute', 1, PRECISION]
+ ]
+ },
+ 'users': {
+ 'options': [None, 'Minecraft Users', 'users', 'spigotmc', 'spigotmc.users', 'area'],
+ 'lines': [
+ ['users', 'Users', 'absolute', 1, 1]
+ ]
+ }
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.host = self.configuration.get('host', 'localhost')
+ self.port = self.configuration.get('port', 25575)
+ self.password = self.configuration.get('password', '')
+ self.console = mcrcon.MCRcon()
+ self.alive = True
+
+ def check(self):
+ if platform.system() != 'Linux':
+ self.error('Only supported on Linux.')
+ return False
+ try:
+ self.connect()
+ except (mcrcon.MCRconException, socket.error) as err:
+ self.error('Error connecting.')
+ self.error(repr(err))
+ return False
+ return True
+
+ def connect(self):
+ self.console.connect(self.host, self.port, self.password)
+
+ def reconnect(self):
+ try:
+ try:
+ self.console.disconnect()
+ except mcrcon.MCRconException:
+ pass
+ self.console.connect(self.host, self.port, self.password)
+ self.alive = True
+ except (mcrcon.MCRconException, socket.error) as err:
+ self.error('Error connecting.')
+ self.error(repr(err))
+ return False
+ return True
+
+ def is_alive(self):
+ if (not self.alive) or \
+ self.console.socket.getsockopt(socket.IPPROTO_TCP, socket.TCP_INFO, 0) != 1:
+ return self.reconnect()
+ return True
+
+ def _get_data(self):
+ if not self.is_alive():
+ return None
+ data = {}
+ try:
+ raw = self.console.command('tps')
+ # The above command returns a string that looks like this:
+ # '§6TPS from last 1m, 5m, 15m: §a19.99, §a19.99, §a19.99\n'
+ # The values we care about are the three numbers after the :
+ tmp = raw.split(':')[1].split(',')
+ data['tps1'] = float(tmp[0].lstrip(u' §a*')) * PRECISION
+ data['tps5'] = float(tmp[1].lstrip(u' §a*')) * PRECISION
+ data['tps15'] = float(tmp[2].lstrip(u' §a*').rstrip()) * PRECISION
+ except mcrcon.MCRconException:
+ self.error('Unable to fetch TPS values.')
+ except socket.error:
+ self.error('Connection is dead.')
+ self.alive = False
+ return None
+ except (TypeError, LookupError):
+ self.error('Unable to process TPS values.')
+ try:
+ raw = self.console.command('list')
+ # The above command returns a string that looks like this:
+ # 'There are 0/20 players online:'
+ # We care about the first number here.
+ data['users'] = int(raw.split()[2].split('/')[0])
+ except mcrcon.MCRconException:
+ self.error('Unable to fetch user counts.')
+ except socket.error:
+ self.error('Connection is dead.')
+ self.alive = False
+ return None
+ except (TypeError, LookupError):
+ self.error('Unable to process user counts.')
+ return data
diff --git a/collectors/python.d.plugin/spigotmc/spigotmc.conf b/collectors/python.d.plugin/spigotmc/spigotmc.conf
new file mode 100644
index 000000000..3ba492def
--- /dev/null
+++ b/collectors/python.d.plugin/spigotmc/spigotmc.conf
@@ -0,0 +1,68 @@
+# netdata python.d.plugin configuration for spigotmc
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# In addition to the above, spigotmc supports the following:
+#
+# host: localhost # The host to connect to. Defaults to the local system.
+# port: 25575 # THe port the remote console is listening on.
+# password: '' # The remote console password. Most be set correctly.
diff --git a/collectors/python.d.plugin/springboot/Makefile.inc b/collectors/python.d.plugin/springboot/Makefile.inc
new file mode 100644
index 000000000..06775f937
--- /dev/null
+++ b/collectors/python.d.plugin/springboot/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += springboot/springboot.chart.py
+dist_pythonconfig_DATA += springboot/springboot.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += springboot/README.md springboot/Makefile.inc
+
diff --git a/collectors/python.d.plugin/springboot/README.md b/collectors/python.d.plugin/springboot/README.md
new file mode 100644
index 000000000..008436a4f
--- /dev/null
+++ b/collectors/python.d.plugin/springboot/README.md
@@ -0,0 +1,129 @@
+# springboot
+
+This module will monitor one or more Java Spring-boot applications depending on configuration.
+
+It produces following charts:
+
+1. **Response Codes** in requests/s
+ * 1xx
+ * 2xx
+ * 3xx
+ * 4xx
+ * 5xx
+ * others
+
+2. **Threads**
+ * daemon
+ * total
+
+3. **GC Time** in milliseconds and **GC Operations** in operations/s
+ * Copy
+ * MarkSweep
+ * ...
+
+4. **Heap Mmeory Usage** in KB
+ * used
+ * committed
+
+### configuration
+
+Please see the [Monitoring Java Spring Boot Applications](https://github.com/netdata/netdata/wiki/Monitoring-Java-Spring-Boot-Applications) page for detailed info about module configuration.
+
+---
+
+# Monitoring Java Spring Boot Applications
+
+Netdata can be used to monitor running Java [Spring Boot](https://spring.io/) applications that expose their metrics with the use of the **Spring Boot Actuator** included in Spring Boot library.
+
+The Spring Boot Actuator exposes these metrics over HTTP and is very easy to use:
+* add `org.springframework.boot:spring-boot-starter-actuator` to your application dependencies
+* set `endpoints.metrics.sensitive=false` in your `application.properties`
+
+You can create custom Metrics by add and inject a PublicMetrics in your application.
+This is a example to add custom metrics:
+```java
+package com.example;
+
+import org.springframework.boot.actuate.endpoint.PublicMetrics;
+import org.springframework.boot.actuate.metrics.Metric;
+import org.springframework.stereotype.Service;
+
+import java.lang.management.ManagementFactory;
+import java.lang.management.MemoryPoolMXBean;
+import java.util.ArrayList;
+import java.util.Collection;
+
+@Service
+public class HeapPoolMetrics implements PublicMetrics {
+
+ private static final String PREFIX = "mempool.";
+ private static final String KEY_EDEN = PREFIX + "eden";
+ private static final String KEY_SURVIVOR = PREFIX + "survivor";
+ private static final String KEY_TENURED = PREFIX + "tenured";
+
+ @Override
+ public Collection<Metric<?>> metrics() {
+ Collection<Metric<?>> result = new ArrayList<>(4);
+ for (MemoryPoolMXBean mem : ManagementFactory.getMemoryPoolMXBeans()) {
+ String poolName = mem.getName();
+ String name = null;
+ if (poolName.indexOf("Eden Space") != -1) {
+ name = KEY_EDEN;
+ } else if (poolName.indexOf("Survivor Space") != -1) {
+ name = KEY_SURVIVOR;
+ } else if (poolName.indexOf("Tenured Gen") != -1 || poolName.indexOf("Old Gen") != -1) {
+ name = KEY_TENURED;
+ }
+
+ if (name != null) {
+ result.add(newMemoryMetric(name, mem.getUsage().getMax()));
+ result.add(newMemoryMetric(name + ".init", mem.getUsage().getInit()));
+ result.add(newMemoryMetric(name + ".committed", mem.getUsage().getCommitted()));
+ result.add(newMemoryMetric(name + ".used", mem.getUsage().getUsed()));
+ }
+ }
+ return result;
+ }
+
+ private Metric<Long> newMemoryMetric(String name, long bytes) {
+ return new Metric<>(name, bytes / 1024);
+ }
+}
+```
+
+Please refer [Spring Boot Actuator: Production-ready features](https://docs.spring.io/spring-boot/docs/current/reference/html/production-ready.html) and [81. Actuator - Part IX. ‘How-to’ guides](https://docs.spring.io/spring-boot/docs/current/reference/html/howto-actuator.html) for more information.
+
+## Using netdata springboot module
+
+The springboot module is enabled by default. It looks up `http://localhost:8080/metrics` and `http://127.0.0.1:8080/metrics` to detect Spring Boot application by default. You can change it by editing `/etc/netdata/python.d/springboot.conf` (to edit it on your system run `/etc/netdata/edit-config python.d/springboot.conf`).
+
+This module defines some common charts, and you can add custom charts by change the configurations.
+
+The configuration format is like:
+```yaml
+<id>:
+ name: '<name>'
+ url: '<metrics endpoint>' # ex. http://localhost:8080/metrics
+ user: '<username>' # optional
+ pass: '<password>' # optional
+ defaults:
+ [<chart-id>]: true|false
+ extras:
+ - id: '<chart-id>'
+ options:
+ title: '***'
+ units: '***'
+ family: '***'
+ context: 'springboot.***'
+ charttype: 'stacked' | 'area' | 'line'
+ lines:
+ - { dimension: 'myapp_ok', name: 'ok', algorithm: 'absolute', multiplier: 1, divisor: 1} # it shows "myapp.ok" metrics
+ - { dimension: 'myapp_ng', name: 'ng', algorithm: 'absolute', multiplier: 1, divisor: 1} # it shows "myapp.ng" metrics
+```
+
+By default, it creates `response_code`, `threads`, `gc_time`, `gc_ope` abd `heap` charts.
+You can disable the default charts by set `defaults.<chart-id>: false`.
+
+The dimension name of extras charts should replace `.` to `_`.
+
+Please check [springboot.conf](springboot.conf) for more examples. \ No newline at end of file
diff --git a/collectors/python.d.plugin/springboot/springboot.chart.py b/collectors/python.d.plugin/springboot/springboot.chart.py
new file mode 100644
index 000000000..7df37e1d0
--- /dev/null
+++ b/collectors/python.d.plugin/springboot/springboot.chart.py
@@ -0,0 +1,159 @@
+# -*- coding: utf-8 -*-
+# Description: tomcat netdata python.d module
+# Author: Wing924
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import json
+from bases.FrameworkServices.UrlService import UrlService
+
+# default module values (can be overridden per job in `config`)
+# update_every = 2
+priority = 60000
+retries = 60
+
+
+DEFAULT_ORDER = ['response_code', 'threads', 'gc_time', 'gc_ope', 'heap']
+
+DEFAULT_CHARTS = {
+ 'response_code': {
+ 'options': [None, "Response Codes", "requests/s", "response", "springboot.response_code", "stacked"],
+ 'lines': [
+ ["resp_other", 'Other', 'incremental'],
+ ["resp_1xx", '1xx', 'incremental'],
+ ["resp_2xx", '2xx', 'incremental'],
+ ["resp_3xx", '3xx', 'incremental'],
+ ["resp_4xx", '4xx', 'incremental'],
+ ["resp_5xx", '5xx', 'incremental'],
+ ]
+ },
+ 'threads': {
+ 'options': [None, "Threads", "current threads", "threads", "springboot.threads", "area"],
+ 'lines': [
+ ["threads_daemon", 'daemon', 'absolute'],
+ ["threads", 'total', 'absolute'],
+ ]
+ },
+ 'gc_time': {
+ 'options': [None, "GC Time", "milliseconds", "garbage collection", "springboot.gc_time", "stacked"],
+ 'lines': [
+ ["gc_copy_time", 'Copy', 'incremental'],
+ ["gc_marksweepcompact_time", 'MarkSweepCompact', 'incremental'],
+ ["gc_parnew_time", 'ParNew', 'incremental'],
+ ["gc_concurrentmarksweep_time", 'ConcurrentMarkSweep', 'incremental'],
+ ["gc_ps_scavenge_time", 'PS Scavenge', 'incremental'],
+ ["gc_ps_marksweep_time", 'PS MarkSweep', 'incremental'],
+ ["gc_g1_young_generation_time", 'G1 Young Generation', 'incremental'],
+ ["gc_g1_old_generation_time", 'G1 Old Generation', 'incremental'],
+ ]
+ },
+ 'gc_ope': {
+ 'options': [None, "GC Operations", "operations/s", "garbage collection", "springboot.gc_ope", "stacked"],
+ 'lines': [
+ ["gc_copy_count", 'Copy', 'incremental'],
+ ["gc_marksweepcompact_count", 'MarkSweepCompact', 'incremental'],
+ ["gc_parnew_count", 'ParNew', 'incremental'],
+ ["gc_concurrentmarksweep_count", 'ConcurrentMarkSweep', 'incremental'],
+ ["gc_ps_scavenge_count", 'PS Scavenge', 'incremental'],
+ ["gc_ps_marksweep_count", 'PS MarkSweep', 'incremental'],
+ ["gc_g1_young_generation_count", 'G1 Young Generation', 'incremental'],
+ ["gc_g1_old_generation_count", 'G1 Old Generation', 'incremental'],
+ ]
+ },
+ 'heap': {
+ 'options': [None, "Heap Memory Usage", "KB", "heap memory", "springboot.heap", "area"],
+ 'lines': [
+ ["heap_committed", 'committed', "absolute"],
+ ["heap_used", 'used', "absolute"],
+ ]
+ }
+}
+
+
+class ExtraChartError(ValueError):
+ pass
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.url = self.configuration.get('url', "http://localhost:8080/metrics")
+ self._setup_charts()
+
+ def _get_data(self):
+ """
+ Format data received from http request
+ :return: dict
+ """
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+
+ try:
+ data = json.loads(raw_data)
+ except ValueError:
+ self.debug('%s is not a vaild JSON page' % self.url)
+ return None
+
+ result = {
+ 'resp_1xx': 0,
+ 'resp_2xx': 0,
+ 'resp_3xx': 0,
+ 'resp_4xx': 0,
+ 'resp_5xx': 0,
+ 'resp_other': 0,
+ }
+
+ for key, value in data.iteritems():
+ if 'counter.status.' in key:
+ status_type = key[15:16] + 'xx'
+ if status_type[0] not in '12345':
+ status_type = 'other'
+ result['resp_' + status_type] += value
+ else:
+ result[key.replace('.', '_')] = value
+
+ return result or None
+
+ def _setup_charts(self):
+ self.order = []
+ self.definitions = {}
+ defaults = self.configuration.get('defaults', {})
+
+ for chart in DEFAULT_ORDER:
+ if defaults.get(chart, True):
+ self.order.append(chart)
+ self.definitions[chart] = DEFAULT_CHARTS[chart]
+
+ for extra in self.configuration.get('extras', []):
+ self._add_extra_chart(extra)
+ self.order.append(extra['id'])
+
+ def _add_extra_chart(self, chart):
+ chart_id = chart.get('id', None) or self.die('id is not defined in extra chart')
+ options = chart.get('options', None) or self.die('option is not defined in extra chart: %s' % chart_id)
+ lines = chart.get('lines', None) or self.die('lines is not defined in extra chart: %s' % chart_id)
+
+ title = options.get('title', None) or self.die('title is missing: %s' % chart_id)
+ units = options.get('units', None) or self.die('units is missing: %s' % chart_id)
+ family = options.get('family', title)
+ context = options.get('context', 'springboot.' + title)
+ charttype = options.get('charttype', 'line')
+
+ result = {
+ 'options': [None, title, units, family, context, charttype],
+ 'lines': [],
+ }
+
+ for line in lines:
+ dimension = line.get('dimension', None) or self.die('dimension is missing: %s' % chart_id)
+ name = line.get('name', dimension)
+ algorithm = line.get('algorithm', 'absolute')
+ multiplier = line.get('multiplier', 1)
+ divisor = line.get('divisor', 1)
+ result['lines'].append([dimension, name, algorithm, multiplier, divisor])
+
+ self.definitions[chart_id] = result
+
+ @staticmethod
+ def die(error_message):
+ raise ExtraChartError(error_message)
diff --git a/collectors/python.d.plugin/springboot/springboot.conf b/collectors/python.d.plugin/springboot/springboot.conf
new file mode 100644
index 000000000..40b5fb437
--- /dev/null
+++ b/collectors/python.d.plugin/springboot/springboot.conf
@@ -0,0 +1,120 @@
+# netdata python.d.plugin configuration for springboot
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, this plugin also supports the following:
+#
+# url: 'http://127.0.0.1/metrics' # the URL of the spring boot actuator metrics
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+# defaults:
+# [chart_id]: true | false # enables/disables default charts, defaults true.
+# extras: {} # defines extra charts to monitor, please see the example below
+# - id: [chart_id]
+# options: {}
+# lines: []
+#
+# If all defaults is disabled and no extra charts are defined, this module will disable itself, as it has no data to
+# collect.
+#
+# Configuration example
+# ---------------------
+# expample:
+# name: 'example'
+# url: 'http://localhost:8080/metrics'
+# defaults:
+# response_code: true
+# threads: true
+# gc_time: true
+# gc_ope: true
+# heap: false
+# extras:
+# - id: 'heap'
+# options: { title: 'Heap Memory Usage', units: 'KB', family: 'heap memory', context: 'springboot.heap', charttype: 'stacked' }
+# lines:
+# - { dimension: 'mem_free', name: 'free'}
+# - { dimension: 'mempool_eden_used', name: 'eden', algorithm: 'absolute', multiplier: 1, divisor: 1}
+# - { dimension: 'mempool_survivor_used', name: 'survivor', algorithm: 'absolute', multiplier: 1, divisor: 1}
+# - { dimension: 'mempool_tenured_used', name: 'tenured', algorithm: 'absolute', multiplier: 1, divisor: 1}
+# - id: 'heap_eden'
+# options: { title: 'Eden Memory Usage', units: 'KB', family: 'heap memory', context: 'springboot.heap_eden', charttype: 'area' }
+# lines:
+# - { dimension: 'mempool_eden_used', name: 'used'}
+# - { dimension: 'mempool_eden_committed', name: 'commited'}
+# - id: 'heap_survivor'
+# options: { title: 'Survivor Memory Usage', units: 'KB', family: 'heap memory', context: 'springboot.heap_survivor', charttype: 'area' }
+# lines:
+# - { dimension: 'mempool_survivor_used', name: 'used'}
+# - { dimension: 'mempool_survivor_committed', name: 'commited'}
+# - id: 'heap_tenured'
+# options: { title: 'Tenured Memory Usage', units: 'KB', family: 'heap memory', context: 'springboot.heap_tenured', charttype: 'area' }
+# lines:
+# - { dimension: 'mempool_tenured_used', name: 'used'}
+# - { dimension: 'mempool_tenured_committed', name: 'commited'}
+
+
+local:
+ name: 'local'
+ url: 'http://localhost:8080/metrics'
+
+local_ip:
+ name: 'local'
+ url: 'http://127.0.0.1:8080/metrics'
diff --git a/collectors/python.d.plugin/squid/Makefile.inc b/collectors/python.d.plugin/squid/Makefile.inc
new file mode 100644
index 000000000..76ecff81e
--- /dev/null
+++ b/collectors/python.d.plugin/squid/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += squid/squid.chart.py
+dist_pythonconfig_DATA += squid/squid.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += squid/README.md squid/Makefile.inc
+
diff --git a/collectors/python.d.plugin/squid/README.md b/collectors/python.d.plugin/squid/README.md
new file mode 100644
index 000000000..9c9b62f27
--- /dev/null
+++ b/collectors/python.d.plugin/squid/README.md
@@ -0,0 +1,38 @@
+# squid
+
+This module will monitor one or more squid instances depending on configuration.
+
+It produces following charts:
+
+1. **Client Bandwidth** in kilobits/s
+ * in
+ * out
+ * hits
+
+2. **Client Requests** in requests/s
+ * requests
+ * hits
+ * errors
+
+3. **Server Bandwidth** in kilobits/s
+ * in
+ * out
+
+4. **Server Requests** in requests/s
+ * requests
+ * errors
+
+### configuration
+
+```yaml
+priority : 50000
+
+local:
+ request : 'cache_object://localhost:3128/counters'
+ host : 'localhost'
+ port : 3128
+```
+
+Without any configuration module will try to autodetect where squid presents its `counters` data
+
+---
diff --git a/collectors/python.d.plugin/squid/squid.chart.py b/collectors/python.d.plugin/squid/squid.chart.py
new file mode 100644
index 000000000..fd54168f0
--- /dev/null
+++ b/collectors/python.d.plugin/squid/squid.chart.py
@@ -0,0 +1,125 @@
+# -*- coding: utf-8 -*-
+# Description: squid netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from bases.FrameworkServices.SocketService import SocketService
+
+
+# default module values (can be overridden per job in `config`)
+# update_every = 2
+priority = 60000
+retries = 60
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = ['clients_net', 'clients_requests', 'servers_net', 'servers_requests']
+
+CHARTS = {
+ 'clients_net': {
+ 'options': [None, 'Squid Client Bandwidth', 'kilobits/s', 'clients', 'squid.clients_net', 'area'],
+ 'lines': [
+ ['client_http_kbytes_in', 'in', 'incremental', 8, 1],
+ ['client_http_kbytes_out', 'out', 'incremental', -8, 1],
+ ['client_http_hit_kbytes_out', 'hits', 'incremental', -8, 1]
+ ]
+ },
+ 'clients_requests': {
+ 'options': [None, 'Squid Client Requests', 'requests/s', 'clients', 'squid.clients_requests', 'line'],
+ 'lines': [
+ ['client_http_requests', 'requests', 'incremental'],
+ ['client_http_hits', 'hits', 'incremental'],
+ ['client_http_errors', 'errors', 'incremental', -1, 1]
+ ]
+ },
+ 'servers_net': {
+ 'options': [None, 'Squid Server Bandwidth', 'kilobits/s', 'servers', 'squid.servers_net', 'area'],
+ 'lines': [
+ ['server_all_kbytes_in', 'in', 'incremental', 8, 1],
+ ['server_all_kbytes_out', 'out', 'incremental', -8, 1]
+ ]
+ },
+ 'servers_requests': {
+ 'options': [None, 'Squid Server Requests', 'requests/s', 'servers', 'squid.servers_requests', 'line'],
+ 'lines': [
+ ['server_all_requests', 'requests', 'incremental'],
+ ['server_all_errors', 'errors', 'incremental', -1, 1]
+ ]
+ }
+}
+
+
+class Service(SocketService):
+ def __init__(self, configuration=None, name=None):
+ SocketService.__init__(self, configuration=configuration, name=name)
+ self._keep_alive = True
+ self.request = ''
+ self.host = 'localhost'
+ self.port = 3128
+ self.order = ORDER
+ self.definitions = CHARTS
+
+ def _get_data(self):
+ """
+ Get data via http request
+ :return: dict
+ """
+ response = self._get_raw_data()
+
+ data = dict()
+ try:
+ raw = ''
+ for tmp in response.split('\r\n'):
+ if tmp.startswith('sample_time'):
+ raw = tmp
+ break
+
+ if raw.startswith('<'):
+ self.error('invalid data received')
+ return None
+
+ for row in raw.split('\n'):
+ if row.startswith(('client', 'server.all')):
+ tmp = row.split('=')
+ data[tmp[0].replace('.', '_').strip(' ')] = int(tmp[1])
+
+ except (ValueError, AttributeError, TypeError):
+ self.error('invalid data received')
+ return None
+
+ if not data:
+ self.error('no data received')
+ return None
+ return data
+
+ def _check_raw_data(self, data):
+ header = data[:1024].lower()
+
+ if 'connection: keep-alive' in header:
+ self._keep_alive = True
+ else:
+ self._keep_alive = False
+
+ if data[-7:] == '\r\n0\r\n\r\n' and 'transfer-encoding: chunked' in header: # HTTP/1.1 response
+ self.debug('received full response from squid')
+ return True
+
+ self.debug('waiting more data from squid')
+ return False
+
+ def check(self):
+ """
+ Parse essential configuration, autodetect squid configuration (if needed), and check if data is available
+ :return: boolean
+ """
+ self._parse_config()
+ # format request
+ req = self.request.decode()
+ if not req.startswith('GET'):
+ req = 'GET ' + req
+ if not req.endswith(' HTTP/1.1\r\n\r\n'):
+ req += ' HTTP/1.1\r\n\r\n'
+ self.request = req.encode()
+ if self._get_data() is not None:
+ return True
+ else:
+ return False
diff --git a/collectors/python.d.plugin/squid/squid.conf b/collectors/python.d.plugin/squid/squid.conf
new file mode 100644
index 000000000..564187f00
--- /dev/null
+++ b/collectors/python.d.plugin/squid/squid.conf
@@ -0,0 +1,169 @@
+# netdata python.d.plugin configuration for squid
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, squid also supports the following:
+#
+# host : 'IP or HOSTNAME' # the host to connect to
+# port : PORT # the port to connect to
+# request: 'URL' # the URL to request from squid
+#
+
+# ----------------------------------------------------------------------
+# SQUID CONFIGURATION
+#
+# See:
+# http://wiki.squid-cache.org/Features/CacheManager
+#
+# In short, add to your squid configuration these:
+#
+# http_access allow localhost manager
+# http_access deny manager
+#
+# To remotely monitor a squid:
+#
+# acl managerAdmin src 192.0.2.1
+# http_access allow localhost manager
+# http_access allow managerAdmin manager
+# http_access deny manager
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+tcp3128old:
+ name : 'local'
+ host : 'localhost'
+ port : 3128
+ request : 'cache_object://localhost:3128/counters'
+
+tcp8080old:
+ name : 'local'
+ host : 'localhost'
+ port : 8080
+ request : 'cache_object://localhost:3128/counters'
+
+tcp3128new:
+ name : 'local'
+ host : 'localhost'
+ port : 3128
+ request : '/squid-internal-mgr/counters'
+
+tcp8080new:
+ name : 'local'
+ host : 'localhost'
+ port : 8080
+ request : '/squid-internal-mgr/counters'
+
+# IPv4
+
+tcp3128oldipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 3128
+ request : 'cache_object://127.0.0.1:3128/counters'
+
+tcp8080oldipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 8080
+ request : 'cache_object://127.0.0.1:3128/counters'
+
+tcp3128newipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 3128
+ request : '/squid-internal-mgr/counters'
+
+tcp8080newipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 8080
+ request : '/squid-internal-mgr/counters'
+
+# IPv6
+
+tcp3128oldipv6:
+ name : 'local'
+ host : '::1'
+ port : 3128
+ request : 'cache_object://[::1]:3128/counters'
+
+tcp8080oldipv6:
+ name : 'local'
+ host : '::1'
+ port : 8080
+ request : 'cache_object://[::1]:3128/counters'
+
+tcp3128newipv6:
+ name : 'local'
+ host : '::1'
+ port : 3128
+ request : '/squid-internal-mgr/counters'
+
+tcp8080newipv6:
+ name : 'local'
+ host : '::1'
+ port : 8080
+ request : '/squid-internal-mgr/counters'
+
diff --git a/collectors/python.d.plugin/tomcat/Makefile.inc b/collectors/python.d.plugin/tomcat/Makefile.inc
new file mode 100644
index 000000000..940a7835e
--- /dev/null
+++ b/collectors/python.d.plugin/tomcat/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += tomcat/tomcat.chart.py
+dist_pythonconfig_DATA += tomcat/tomcat.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += tomcat/README.md tomcat/Makefile.inc
+
diff --git a/collectors/python.d.plugin/tomcat/README.md b/collectors/python.d.plugin/tomcat/README.md
new file mode 100644
index 000000000..e548bd338
--- /dev/null
+++ b/collectors/python.d.plugin/tomcat/README.md
@@ -0,0 +1,33 @@
+# tomcat
+
+Present tomcat containers memory utilization.
+
+Charts:
+
+1. **Requests** per second
+ * accesses
+
+2. **Volume** in KB/s
+ * volume
+
+3. **Threads**
+ * current
+ * busy
+
+4. **JVM Free Memory** in MB
+ * jvm
+
+### configuration
+
+```yaml
+localhost:
+ name : 'local'
+ url : 'http://127.0.0.1:8080/manager/status?XML=true'
+ user : 'tomcat_username'
+ pass : 'secret_tomcat_password'
+```
+
+Without configuration, module attempts to connect to `http://localhost:8080/manager/status?XML=true`, without any credentials.
+So it will probably fail.
+
+---
diff --git a/collectors/python.d.plugin/tomcat/tomcat.chart.py b/collectors/python.d.plugin/tomcat/tomcat.chart.py
new file mode 100644
index 000000000..3c2d0ed40
--- /dev/null
+++ b/collectors/python.d.plugin/tomcat/tomcat.chart.py
@@ -0,0 +1,163 @@
+# -*- coding: utf-8 -*-
+# Description: tomcat netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# Author: Wei He (Wing924)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import xml.etree.ElementTree as ET
+
+from bases.FrameworkServices.UrlService import UrlService
+
+# default module values (can be overridden per job in `config`)
+# update_every = 2
+priority = 60000
+retries = 60
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = ['accesses', 'bandwidth', 'processing_time', 'threads', 'jvm', 'jvm_eden', 'jvm_survivor', 'jvm_tenured']
+
+CHARTS = {
+ 'accesses': {
+ 'options': [None, 'Requests', 'requests/s', 'statistics', 'tomcat.accesses', 'area'],
+ 'lines': [
+ ['requestCount', 'accesses', 'incremental'],
+ ['errorCount', 'errors', 'incremental'],
+ ]
+ },
+ 'bandwidth': {
+ 'options': [None, 'Bandwidth', 'KB/s', 'statistics', 'tomcat.bandwidth', 'area'],
+ 'lines': [
+ ['bytesSent', 'sent', 'incremental', 1, 1024],
+ ['bytesReceived', 'received', 'incremental', 1, 1024],
+ ]
+ },
+ 'processing_time': {
+ 'options': [None, 'processing time', 'seconds', 'statistics', 'tomcat.processing_time', 'area'],
+ 'lines': [
+ ['processingTime', 'processing time', 'incremental', 1, 1000]
+ ]
+ },
+ 'threads': {
+ 'options': [None, 'Threads', 'current threads', 'statistics', 'tomcat.threads', 'area'],
+ 'lines': [
+ ['currentThreadCount', 'current', 'absolute'],
+ ['currentThreadsBusy', 'busy', 'absolute']
+ ]
+ },
+ 'jvm': {
+ 'options': [None, 'JVM Memory Pool Usage', 'MB', 'memory', 'tomcat.jvm', 'stacked'],
+ 'lines': [
+ ['free', 'free', 'absolute', 1, 1048576],
+ ['eden_used', 'eden', 'absolute', 1, 1048576],
+ ['survivor_used', 'survivor', 'absolute', 1, 1048576],
+ ['tenured_used', 'tenured', 'absolute', 1, 1048576],
+ ['code_cache_used', 'code cache', 'absolute', 1, 1048576],
+ ['compressed_used', 'compressed', 'absolute', 1, 1048576],
+ ['metaspace_used', 'metaspace', 'absolute', 1, 1048576],
+ ]
+ },
+ 'jvm_eden': {
+ 'options': [None, 'Eden Memory Usage', 'MB', 'memory', 'tomcat.jvm_eden', 'area'],
+ 'lines': [
+ ['eden_used', 'used', 'absolute', 1, 1048576],
+ ['eden_committed', 'committed', 'absolute', 1, 1048576],
+ ['eden_max', 'max', 'absolute', 1, 1048576]
+ ]
+ },
+ 'jvm_survivor': {
+ 'options': [None, 'Survivor Memory Usage', 'MB', 'memory', 'tomcat.jvm_survivor', 'area'],
+ 'lines': [
+ ['survivor_used', 'used', 'absolute', 1, 1048576],
+ ['survivor_committed', 'committed', 'absolute', 1, 1048576],
+ ['survivor_max', 'max', 'absolute', 1, 1048576]
+ ]
+ },
+ 'jvm_tenured': {
+ 'options': [None, 'Tenured Memory Usage', 'MB', 'memory', 'tomcat.jvm_tenured', 'area'],
+ 'lines': [
+ ['tenured_used', 'used', 'absolute', 1, 1048576],
+ ['tenured_committed', 'committed', 'absolute', 1, 1048576],
+ ['tenured_max', 'max', 'absolute', 1, 1048576]
+ ]
+ }
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.url = self.configuration.get('url', 'http://127.0.0.1:8080/manager/status?XML=true')
+ self.connector_name = self.configuration.get('connector_name', None)
+ self.order = ORDER
+ self.definitions = CHARTS
+
+ def _get_data(self):
+ """
+ Format data received from http request
+ :return: dict
+ """
+ data = None
+ raw_data = self._get_raw_data()
+ if raw_data:
+ try:
+ xml = ET.fromstring(raw_data)
+ except ET.ParseError:
+ self.debug('%s is not a vaild XML page. Please add "?XML=true" to tomcat status page.' % self.url)
+ return None
+ data = {}
+
+ jvm = xml.find('jvm')
+
+ connector = None
+ if self.connector_name:
+ for conn in xml.findall('connector'):
+ if self.connector_name in conn.get('name'):
+ connector = conn
+ break
+ else:
+ connector = xml.find('connector')
+
+ memory = jvm.find('memory')
+ data['free'] = memory.get('free')
+ data['total'] = memory.get('total')
+
+ for pool in jvm.findall('memorypool'):
+ name = pool.get('name')
+ if 'Eden Space' in name:
+ data['eden_used'] = pool.get('usageUsed')
+ data['eden_committed'] = pool.get('usageCommitted')
+ data['eden_max'] = pool.get('usageMax')
+ elif 'Survivor Space' in name:
+ data['survivor_used'] = pool.get('usageUsed')
+ data['survivor_committed'] = pool.get('usageCommitted')
+ data['survivor_max'] = pool.get('usageMax')
+ elif 'Tenured Gen' in name or 'Old Gen' in name:
+ data['tenured_used'] = pool.get('usageUsed')
+ data['tenured_committed'] = pool.get('usageCommitted')
+ data['tenured_max'] = pool.get('usageMax')
+ elif name == 'Code Cache':
+ data['code_cache_used'] = pool.get('usageUsed')
+ data['code_cache_committed'] = pool.get('usageCommitted')
+ data['code_cache_max'] = pool.get('usageMax')
+ elif name == 'Compressed':
+ data['compressed_used'] = pool.get('usageUsed')
+ data['compressed_committed'] = pool.get('usageCommitted')
+ data['compressed_max'] = pool.get('usageMax')
+ elif name == 'Metaspace':
+ data['metaspace_used'] = pool.get('usageUsed')
+ data['metaspace_committed'] = pool.get('usageCommitted')
+ data['metaspace_max'] = pool.get('usageMax')
+
+ if connector:
+ thread_info = connector.find('threadInfo')
+ data['currentThreadsBusy'] = thread_info.get('currentThreadsBusy')
+ data['currentThreadCount'] = thread_info.get('currentThreadCount')
+
+ request_info = connector.find('requestInfo')
+ data['processingTime'] = request_info.get('processingTime')
+ data['requestCount'] = request_info.get('requestCount')
+ data['errorCount'] = request_info.get('errorCount')
+ data['bytesReceived'] = request_info.get('bytesReceived')
+ data['bytesSent'] = request_info.get('bytesSent')
+
+ return data or None
diff --git a/collectors/python.d.plugin/tomcat/tomcat.conf b/collectors/python.d.plugin/tomcat/tomcat.conf
new file mode 100644
index 000000000..c63f06cfa
--- /dev/null
+++ b/collectors/python.d.plugin/tomcat/tomcat.conf
@@ -0,0 +1,91 @@
+# netdata python.d.plugin configuration for tomcat
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, tomcat also supports the following:
+#
+# url: 'URL' # the URL to fetch nginx's status stats
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+# if you have multiple connectors, the following are supported:
+#
+# connector_name: 'ajp-bio-8009' # default is null, which use first connector in status XML
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ url : 'http://localhost:8080/manager/status?XML=true'
+
+localipv4:
+ name : 'local'
+ url : 'http://127.0.0.1:8080/manager/status?XML=true'
+
+localipv6:
+ name : 'local'
+ url : 'http://[::1]:8080/manager/status?XML=true'
diff --git a/collectors/python.d.plugin/traefik/Makefile.inc b/collectors/python.d.plugin/traefik/Makefile.inc
new file mode 100644
index 000000000..926d56dda
--- /dev/null
+++ b/collectors/python.d.plugin/traefik/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += traefik/traefik.chart.py
+dist_pythonconfig_DATA += traefik/traefik.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += traefik/README.md traefik/Makefile.inc
+
diff --git a/collectors/python.d.plugin/traefik/README.md b/collectors/python.d.plugin/traefik/README.md
new file mode 100644
index 000000000..9b4a18208
--- /dev/null
+++ b/collectors/python.d.plugin/traefik/README.md
@@ -0,0 +1,54 @@
+# traefik
+
+Module uses the `health` API to provide statistics.
+
+It produces:
+
+1. **Responses** by statuses
+ * success (1xx, 2xx, 304)
+ * error (5xx)
+ * redirect (3xx except 304)
+ * bad (4xx)
+ * other (all other responses)
+
+2. **Responses** by codes
+ * 2xx (successful)
+ * 5xx (internal server errors)
+ * 3xx (redirect)
+ * 4xx (bad)
+ * 1xx (informational)
+ * other (non-standart responses)
+
+3. **Detailed Response Codes** requests/s (number of responses for each response code family individually)
+
+4. **Requests**/s
+ * request statistics
+
+5. **Total response time**
+ * sum of all response time
+
+6. **Average response time**
+
+7. **Average response time per iteration**
+
+8. **Uptime**
+ * Traefik server uptime
+
+### configuration
+
+Needs only `url` to server's `health`
+
+Here is an example for local server:
+
+```yaml
+update_every : 1
+priority : 60000
+
+local:
+ url : 'http://localhost:8080/health'
+ retries : 10
+```
+
+Without configuration, module attempts to connect to `http://localhost:8080/health`.
+
+---
diff --git a/collectors/python.d.plugin/traefik/traefik.chart.py b/collectors/python.d.plugin/traefik/traefik.chart.py
new file mode 100644
index 000000000..dc8933220
--- /dev/null
+++ b/collectors/python.d.plugin/traefik/traefik.chart.py
@@ -0,0 +1,195 @@
+# -*- coding: utf-8 -*-
+# Description: traefik netdata python.d module
+# Author: Alexandre Menezes (@ale_menezes)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from json import loads
+from collections import defaultdict
+from bases.FrameworkServices.UrlService import UrlService
+
+# default module values (can be overridden per job in `config`)
+update_every = 1
+priority = 60000
+retries = 10
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = [
+ 'response_statuses',
+ 'response_codes',
+ 'detailed_response_codes',
+ 'requests',
+ 'total_response_time',
+ 'average_response_time',
+ 'average_response_time_per_iteration',
+ 'uptime'
+]
+
+CHARTS = {
+ 'response_statuses': {
+ 'options': [None, 'Response statuses', 'requests/s', 'responses', 'traefik.response_statuses', 'stacked'],
+ 'lines': [
+ ['successful_requests', 'success', 'incremental'],
+ ['server_errors', 'error', 'incremental'],
+ ['redirects', 'redirect', 'incremental'],
+ ['bad_requests', 'bad', 'incremental'],
+ ['other_requests', 'other', 'incremental']
+ ]
+ },
+ 'response_codes': {
+ 'options': [None, 'Responses by codes', 'requests/s', 'responses', 'traefik.response_codes', 'stacked'],
+ 'lines': [
+ ['2xx', None, 'incremental'],
+ ['5xx', None, 'incremental'],
+ ['3xx', None, 'incremental'],
+ ['4xx', None, 'incremental'],
+ ['1xx', None, 'incremental'],
+ ['other', None, 'incremental']
+ ]
+ },
+ 'detailed_response_codes': {
+ 'options': [None, 'Detailed response codes', 'requests/s', 'responses', 'traefik.detailed_response_codes',
+ 'stacked'],
+ 'lines': []
+ },
+ 'requests': {
+ 'options': [None, 'Requests', 'requests/s', 'requests', 'traefik.requests', 'line'],
+ 'lines': [
+ ['total_count', 'requests', 'incremental']
+ ]
+ },
+ 'total_response_time': {
+ 'options': [None, 'Total response time', 'seconds', 'timings', 'traefik.total_response_time', 'line'],
+ 'lines': [
+ ['total_response_time_sec', 'response', 'absolute', 1, 10000]
+ ]
+ },
+ 'average_response_time': {
+ 'options': [None, 'Average response time', 'milliseconds', 'timings', 'traefik.average_response_time', 'line'],
+ 'lines': [
+ ['average_response_time_sec', 'response', 'absolute', 1, 1000]
+ ]
+ },
+ 'average_response_time_per_iteration': {
+ 'options': [None, 'Average response time per iteration', 'milliseconds', 'timings',
+ 'traefik.average_response_time_per_iteration', 'line'],
+ 'lines': [
+ ['average_response_time_per_iteration_sec', 'response', 'incremental', 1, 10000]
+ ]
+ },
+ 'uptime': {
+ 'options': [None, 'Uptime', 'seconds', 'uptime', 'traefik.uptime', 'line'],
+ 'lines': [
+ ['uptime_sec', 'uptime', 'absolute']
+ ]
+ }
+}
+
+HEALTH_STATS = [
+ 'uptime_sec',
+ 'average_response_time_sec',
+ 'total_response_time_sec',
+ 'total_count',
+ 'total_status_code_count'
+]
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.url = self.configuration.get('url', 'http://localhost:8080/health')
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.data = {
+ 'successful_requests': 0, 'redirects': 0, 'bad_requests': 0,
+ 'server_errors': 0, 'other_requests': 0, '1xx': 0, '2xx': 0,
+ '3xx': 0, '4xx': 0, '5xx': 0, 'other': 0,
+ 'average_response_time_per_iteration_sec': 0
+ }
+ self.last_total_response_time = 0
+ self.last_total_count = 0
+
+ def _get_data(self):
+ data = self._get_raw_data()
+
+ if not data:
+ return None
+
+ data = loads(data)
+
+ self.get_data_per_code_status(raw_data=data)
+
+ self.get_data_per_code_family(raw_data=data)
+
+ self.get_data_per_code(raw_data=data)
+
+ self.data.update(fetch_data_(raw_data=data, metrics=HEALTH_STATS))
+
+ self.data['average_response_time_sec'] *= 1000000
+ self.data['total_response_time_sec'] *= 10000
+ if data['total_count'] != self.last_total_count:
+ self.data['average_response_time_per_iteration_sec'] = \
+ (data['total_response_time_sec'] - self.last_total_response_time) * \
+ 1000000 / (data['total_count'] - self.last_total_count)
+ else:
+ self.data['average_response_time_per_iteration_sec'] = 0
+ self.last_total_response_time = data['total_response_time_sec']
+ self.last_total_count = data['total_count']
+
+ return self.data or None
+
+ def get_data_per_code_status(self, raw_data):
+ data = defaultdict(int)
+ for code, value in raw_data['total_status_code_count'].items():
+ code_prefix = code[0]
+ if code_prefix == '1' or code_prefix == '2' or code == '304':
+ data['successful_requests'] += value
+ elif code_prefix == '3':
+ data['redirects'] += value
+ elif code_prefix == '4':
+ data['bad_requests'] += value
+ elif code_prefix == '5':
+ data['server_errors'] += value
+ else:
+ data['other_requests'] += value
+ self.data.update(data)
+
+ def get_data_per_code_family(self, raw_data):
+ data = defaultdict(int)
+ for code, value in raw_data['total_status_code_count'].items():
+ code_prefix = code[0]
+ if code_prefix == '1':
+ data['1xx'] += value
+ elif code_prefix == '2':
+ data['2xx'] += value
+ elif code_prefix == '3':
+ data['3xx'] += value
+ elif code_prefix == '4':
+ data['4xx'] += value
+ elif code_prefix == '5':
+ data['5xx'] += value
+ else:
+ data['other'] += value
+ self.data.update(data)
+
+ def get_data_per_code(self, raw_data):
+ for code, value in raw_data['total_status_code_count'].items():
+ if self.charts:
+ if code not in self.data:
+ self.charts['detailed_response_codes'].add_dimension([code, code, 'incremental'])
+ self.data[code] = value
+
+
+def fetch_data_(raw_data, metrics):
+ data = dict()
+
+ for metric in metrics:
+ value = raw_data
+ metrics_list = metric.split('.')
+ try:
+ for m in metrics_list:
+ value = value[m]
+ except KeyError:
+ continue
+ data['_'.join(metrics_list)] = value
+
+ return data
diff --git a/collectors/python.d.plugin/traefik/traefik.conf b/collectors/python.d.plugin/traefik/traefik.conf
new file mode 100644
index 000000000..909b9e549
--- /dev/null
+++ b/collectors/python.d.plugin/traefik/traefik.conf
@@ -0,0 +1,79 @@
+# netdata python.d.plugin configuration for traefik health data API
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 10 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, traefik plugin also supports the following:
+#
+# url: '<scheme>://<host>:<port>/<health_page_api>'
+# # http://localhost:8080/health
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+#
+local:
+ url: 'http://localhost:8080/health'
diff --git a/collectors/python.d.plugin/unbound/Makefile.inc b/collectors/python.d.plugin/unbound/Makefile.inc
new file mode 100644
index 000000000..59c306aed
--- /dev/null
+++ b/collectors/python.d.plugin/unbound/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += unbound/unbound.chart.py
+dist_pythonconfig_DATA += unbound/unbound.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += unbound/README.md unbound/Makefile.inc
+
diff --git a/collectors/python.d.plugin/unbound/README.md b/collectors/python.d.plugin/unbound/README.md
new file mode 100644
index 000000000..3b4fa16fd
--- /dev/null
+++ b/collectors/python.d.plugin/unbound/README.md
@@ -0,0 +1,76 @@
+# unbound
+
+Monitoring uses the remote control interface to fetch statistics.
+
+Provides the following charts:
+
+1. **Queries Processed**
+ * Ratelimited
+ * Cache Misses
+ * Cache Hits
+ * Expired
+ * Prefetched
+ * Recursive
+
+2. **Request List**
+ * Average Size
+ * Max Size
+ * Overwritten Requests
+ * Overruns
+ * Current Size
+ * User Requests
+
+3. **Recursion Timings**
+ * Average recursion processing time
+ * Median recursion processing time
+
+If extended stats are enabled, also provides:
+
+4. **Cache Sizes**
+ * Message Cache
+ * RRset Cache
+ * Infra Cache
+ * DNSSEC Key Cache
+ * DNSCrypt Shared Secret Cache
+ * DNSCrypt Nonce Cache
+
+### configuration
+
+Unbound must be manually configured to enable the remote-control protocol.
+Check the Unbound documentation for info on how to do this. Additionally,
+if you want to take advantage of the autodetection this plugin offers,
+you will need to make sure your `unbound.conf` file only uses spaces for
+indentation (the default config shipped by most distributions uses tabs
+instead of spaces).
+
+Once you have the Unbound control protocol enabled, you need to make sure
+that either the certificate and key are readable by Netdata (if you're
+using the regular control interface), or that the socket is accessible
+to Netdata (if you're using a UNIX socket for the contorl interface).
+
+By default, for the local system, everything can be auto-detected
+assuming Unbound is configured correctly and has been told to listen
+on the loopback interface or a UNIX socket. This is done by looking
+up info in the Unbound config file specified by the `ubconf` key.
+
+To enable extended stats for a given job, add `extended: yes` to the
+definition.
+
+You can also enable per-thread charts for a given job by adding
+`per_thread: yes` to the definition. Note that the numbe rof threads
+is only checked on startup.
+
+A basic local configuration with extended statistics and per-thread
+charts looks like this:
+
+```yaml
+local:
+ ubconf: /etc/unbound/unbound.conf
+ extended: yes
+ per_thread: yes
+```
+
+While it's a bit more complicated to set up correctly, it is recommended
+that you use a UNIX socket as it provides far better performance.
+
+---
diff --git a/collectors/python.d.plugin/unbound/unbound.chart.py b/collectors/python.d.plugin/unbound/unbound.chart.py
new file mode 100644
index 000000000..52fcbf7e2
--- /dev/null
+++ b/collectors/python.d.plugin/unbound/unbound.chart.py
@@ -0,0 +1,275 @@
+# -*- coding: utf-8 -*-
+# Description: unbound netdata python.d module
+# Author: Austin S. Hemmelgarn (Ferroin)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import os
+import sys
+
+from copy import deepcopy
+
+from bases.FrameworkServices.SocketService import SocketService
+from bases.loaders import YamlOrderedLoader
+
+PRECISION = 1000
+
+ORDER = ['queries', 'recursion', 'reqlist']
+
+CHARTS = {
+ 'queries': {
+ 'options': [None, 'Queries Processed', 'queries', 'Unbound', 'unbound.queries', 'line'],
+ 'lines': [
+ ['ratelimit', 'ratelimited', 'absolute', 1, 1],
+ ['cachemiss', 'cache_miss', 'absolute', 1, 1],
+ ['cachehit', 'cache_hit', 'absolute', 1, 1],
+ ['expired', 'expired', 'absolute', 1, 1],
+ ['prefetch', 'prefetched', 'absolute', 1, 1],
+ ['recursive', 'recursive', 'absolute', 1, 1]
+ ]
+ },
+ 'recursion': {
+ 'options': [None, 'Recursion Timings', 'seconds', 'Unbound', 'unbound.recursion', 'line'],
+ 'lines': [
+ ['recursive_avg', 'average', 'absolute', 1, PRECISION],
+ ['recursive_med', 'median', 'absolute', 1, PRECISION]
+ ]
+ },
+ 'reqlist': {
+ 'options': [None, 'Request List', 'items', 'Unbound', 'unbound.reqlist', 'line'],
+ 'lines': [
+ ['reqlist_avg', 'average_size', 'absolute', 1, 1],
+ ['reqlist_max', 'maximum_size', 'absolute', 1, 1],
+ ['reqlist_overwritten', 'overwritten_requests', 'absolute', 1, 1],
+ ['reqlist_exceeded', 'overruns', 'absolute', 1, 1],
+ ['reqlist_current', 'current_size', 'absolute', 1, 1],
+ ['reqlist_user', 'user_requests', 'absolute', 1, 1]
+ ]
+ }
+}
+
+# These get added too if we are told to use extended stats.
+EXTENDED_ORDER = ['cache']
+
+EXTENDED_CHARTS = {
+ 'cache': {
+ 'options': [None, 'Cache Sizes', 'items', 'Unbound', 'unbound.cache', 'stacked'],
+ 'lines': [
+ ['cache_message', 'message_cache', 'absolute', 1, 1],
+ ['cache_rrset', 'rrset_cache', 'absolute', 1, 1],
+ ['cache_infra', 'infra_cache', 'absolute', 1, 1],
+ ['cache_key', 'dnssec_key_cache', 'absolute', 1, 1],
+ ['cache_dnscss', 'dnscrypt_Shared_Secret_cache', 'absolute', 1, 1],
+ ['cache_dnscn', 'dnscrypt_Nonce_cache', 'absolute', 1, 1]
+ ]
+ }
+}
+
+# This is used as a templates for the per-thread charts.
+PER_THREAD_CHARTS = {
+ '_queries': {
+ 'options': [None, '{longname} Queries Processed', 'queries', 'Queries Processed',
+ 'unbound.threads.queries', 'line'],
+ 'lines': [
+ ['{shortname}_ratelimit', 'ratelimited', 'absolute', 1, 1],
+ ['{shortname}_cachemiss', 'cache_miss', 'absolute', 1, 1],
+ ['{shortname}_cachehit', 'cache_hit', 'absolute', 1, 1],
+ ['{shortname}_expired', 'expired', 'absolute', 1, 1],
+ ['{shortname}_prefetch', 'prefetched', 'absolute', 1, 1],
+ ['{shortname}_recursive', 'recursive', 'absolute', 1, 1]
+ ]
+ },
+ '_recursion': {
+ 'options': [None, '{longname} Recursion Timings', 'seconds', 'Recursive Timings',
+ 'unbound.threads.recursion', 'line'],
+ 'lines': [
+ ['{shortname}_recursive_avg', 'average', 'absolute', 1, PRECISION],
+ ['{shortname}_recursive_med', 'median', 'absolute', 1, PRECISION]
+ ]
+ },
+ '_reqlist': {
+ 'options': [None, '{longname} Request List', 'items', 'Request List', 'unbound.threads.reqlist', 'line'],
+ 'lines': [
+ ['{shortname}_reqlist_avg', 'average_size', 'absolute', 1, 1],
+ ['{shortname}_reqlist_max', 'maximum_size', 'absolute', 1, 1],
+ ['{shortname}_reqlist_overwritten', 'overwritten_requests', 'absolute', 1, 1],
+ ['{shortname}_reqlist_exceeded', 'overruns', 'absolute', 1, 1],
+ ['{shortname}_reqlist_current', 'current_size', 'absolute', 1, 1],
+ ['{shortname}_reqlist_user', 'user_requests', 'absolute', 1, 1]
+ ]
+ }
+}
+
+
+# This maps the Unbound stat names to our names and precision requiremnets.
+STAT_MAP = {
+ 'total.num.queries_ip_ratelimited': ('ratelimit', 1),
+ 'total.num.cachehits': ('cachehit', 1),
+ 'total.num.cachemiss': ('cachemiss', 1),
+ 'total.num.zero_ttl': ('expired', 1),
+ 'total.num.prefetch': ('prefetch', 1),
+ 'total.num.recursivereplies': ('recursive', 1),
+ 'total.requestlist.avg': ('reqlist_avg', 1),
+ 'total.requestlist.max': ('reqlist_max', 1),
+ 'total.requestlist.overwritten': ('reqlist_overwritten', 1),
+ 'total.requestlist.exceeded': ('reqlist_exceeded', 1),
+ 'total.requestlist.current.all': ('reqlist_current', 1),
+ 'total.requestlist.current.user': ('reqlist_user', 1),
+ 'total.recursion.time.avg': ('recursive_avg', PRECISION),
+ 'total.recursion.time.median': ('recursive_med', PRECISION),
+ 'msg.cache.count': ('cache_message', 1),
+ 'rrset.cache.count': ('cache_rrset', 1),
+ 'infra.cache.count': ('cache_infra', 1),
+ 'key.cache.count': ('cache_key', 1),
+ 'dnscrypt_shared_secret.cache.count': ('cache_dnscss', 1),
+ 'dnscrypt_nonce.cache.count': ('cache_dnscn', 1)
+}
+
+# Same as above, but for per-thread stats.
+PER_THREAD_STAT_MAP = {
+ '{shortname}.num.queries_ip_ratelimited': ('{shortname}_ratelimit', 1),
+ '{shortname}.num.cachehits': ('{shortname}_cachehit', 1),
+ '{shortname}.num.cachemiss': ('{shortname}_cachemiss', 1),
+ '{shortname}.num.zero_ttl': ('{shortname}_expired', 1),
+ '{shortname}.num.prefetch': ('{shortname}_prefetch', 1),
+ '{shortname}.num.recursivereplies': ('{shortname}_recursive', 1),
+ '{shortname}.requestlist.avg': ('{shortname}_reqlist_avg', 1),
+ '{shortname}.requestlist.max': ('{shortname}_reqlist_max', 1),
+ '{shortname}.requestlist.overwritten': ('{shortname}_reqlist_overwritten', 1),
+ '{shortname}.requestlist.exceeded': ('{shortname}_reqlist_exceeded', 1),
+ '{shortname}.requestlist.current.all': ('{shortname}_reqlist_current', 1),
+ '{shortname}.requestlist.current.user': ('{shortname}_reqlist_user', 1),
+ '{shortname}.recursion.time.avg': ('{shortname}_recursive_avg', PRECISION),
+ '{shortname}.recursion.time.median': ('{shortname}_recursive_med', PRECISION)
+}
+
+
+# Used to actually generate per-thread charts.
+def _get_perthread_info(thread):
+ sname = 'thread{0}'.format(thread)
+ lname = 'Thread {0}'.format(thread)
+ charts = dict()
+ order = []
+ statmap = dict()
+
+ for item in PER_THREAD_CHARTS:
+ cname = '{0}{1}'.format(sname, item)
+ chart = deepcopy(PER_THREAD_CHARTS[item])
+ chart['options'][1] = chart['options'][1].format(longname=lname)
+
+ for index, line in enumerate(chart['lines']):
+ chart['lines'][index][0] = line[0].format(shortname=sname)
+
+ order.append(cname)
+ charts[cname] = chart
+
+ for key, value in PER_THREAD_STAT_MAP.items():
+ statmap[key.format(shortname=sname)] = (value[0].format(shortname=sname), value[1])
+
+ return (charts, order, statmap)
+
+
+class Service(SocketService):
+ def __init__(self, configuration=None, name=None):
+ # The unbound control protocol is always TLS encapsulated
+ # unless it's used over a UNIX socket, so enable TLS _before_
+ # doing the normal SocketService initialization.
+ configuration['tls'] = True
+ self.port = 8935
+ SocketService.__init__(self, configuration, name)
+ self.ext = self.configuration.get('extended', None)
+ self.ubconf = self.configuration.get('ubconf', None)
+ self.perthread = self.configuration.get('per_thread', False)
+ self.threads = None
+ self.order = deepcopy(ORDER)
+ self.definitions = deepcopy(CHARTS)
+ self.request = 'UBCT1 stats\n'
+ self.statmap = deepcopy(STAT_MAP)
+ self._parse_config()
+ self._auto_config()
+ self.debug('Extended stats: {0}'.format(self.ext))
+ self.debug('Per-thread stats: {0}'.format(self.perthread))
+ if self.ext:
+ self.order = self.order + EXTENDED_ORDER
+ self.definitions.update(EXTENDED_CHARTS)
+ if self.unix_socket:
+ self.debug('Using unix socket: {0}'.format(self.unix_socket))
+ else:
+ self.debug('Connecting to: {0}:{1}'.format(self.host, self.port))
+ self.debug('Using key: {0}'.format(self.key))
+ self.debug('Using certificate: {0}'.format(self.cert))
+
+ def _auto_config(self):
+ if self.ubconf and os.access(self.ubconf, os.R_OK):
+ self.debug('Unbound config: {0}'.format(self.ubconf))
+ conf = YamlOrderedLoader.load_config_from_file(self.ubconf)[0]
+ if self.ext is None:
+ if 'extended-statistics' in conf['server']:
+ self.ext = conf['server']['extended-statistics']
+ if 'remote-control' in conf:
+ if conf['remote-control'].get('control-use-cert', False):
+ self.key = self.key or conf['remote-control'].get('control-key-file')
+ self.cert = self.cert or conf['remote-control'].get('control-cert-file')
+ self.port = self.port or conf['remote-control'].get('control-port')
+ else:
+ self.unix_socket = self.unix_socket or conf['remote-control'].get('control-interface')
+ else:
+ self.debug('Unbound configuration not found.')
+ if not self.key:
+ self.key = '/etc/unbound/unbound_control.key'
+ if not self.cert:
+ self.cert = '/etc/unbound/unbound_control.pem'
+ if not self.port:
+ self.port = 8953
+
+ def _generate_perthread_charts(self):
+ tmporder = list()
+ for thread in range(0, self.threads):
+ charts, order, statmap = _get_perthread_info(thread)
+ tmporder.extend(order)
+ self.definitions.update(charts)
+ self.statmap.update(statmap)
+ self.order.extend(sorted(tmporder))
+
+ def check(self):
+ # Check if authentication is working.
+ self._connect()
+ result = bool(self._sock)
+ self._disconnect()
+ # If auth works, and we need per-thread charts, query the server
+ # to see how many threads it's using. This somewhat abuses the
+ # SocketService API to get the data we need.
+ if result and self.perthread:
+ tmp = self.request
+ if sys.version_info[0] < 3:
+ self.request = 'UBCT1 status\n'
+ else:
+ self.request = b'UBCT1 status\n'
+ raw = self._get_raw_data()
+ for line in raw.splitlines():
+ if line.startswith('threads'):
+ self.threads = int(line.split()[1])
+ self._generate_perthread_charts()
+ break
+ if self.threads is None:
+ self.info('Unable to auto-detect thread counts, disabling per-thread stats.')
+ self.perthread = False
+ self.request = tmp
+ return result
+
+ @staticmethod
+ def _check_raw_data(data):
+ # The server will close the connection when it's done sending
+ # data, so just keep looping until that happens.
+ return False
+
+ def _get_data(self):
+ raw = self._get_raw_data()
+ data = dict()
+ tmp = dict()
+ for line in raw.splitlines():
+ stat = line.split('=')
+ tmp[stat[0]] = stat[1]
+ for item in self.statmap:
+ if item in tmp:
+ data[self.statmap[item][0]] = float(tmp[item]) * self.statmap[item][1]
+ return data
diff --git a/collectors/python.d.plugin/unbound/unbound.conf b/collectors/python.d.plugin/unbound/unbound.conf
new file mode 100644
index 000000000..46c4b097f
--- /dev/null
+++ b/collectors/python.d.plugin/unbound/unbound.conf
@@ -0,0 +1,87 @@
+# netdata python.d.plugin configuration for unbound
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_everye
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, unbound also supports the following:
+#
+# host: localhost # The host to connect to.
+# port: 8953 # WHat port to use (defaults to 8953)
+# socket: /path/to/socket # A path to a UNIX socket to use instead
+# # of a TCP connection
+# tls_key_file: /path/to/key # The key file to use for authentication
+# tls_cert_file: /path/to/key # The certificate to use for authentication
+# extended: false # Whether to collect extended stats or not
+# per_thread: false # Whether to show charts for per-thread stats
+#
+# In addition to the above, you can set the following to try and
+# auto-detect most settings based on the unbound configuration:
+#
+# ubconf: /etc/unbound/unbound.conf
+#
+# Note that the SSL key and certificate need to be readable by the user
+# unbound runs as if you're using the regular control interface.
+# If you're using a UNIX socket, that has to be readable by the netdata user.
+
+# The following should work for most users if they have unbound configured
+# correctly.
+local:
+ ubconf: /etc/unbound/unbound.conf
diff --git a/collectors/python.d.plugin/uwsgi/Makefile.inc b/collectors/python.d.plugin/uwsgi/Makefile.inc
new file mode 100644
index 000000000..75d96de0e
--- /dev/null
+++ b/collectors/python.d.plugin/uwsgi/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += uwsgi/uwsgi.chart.py
+dist_pythonconfig_DATA += uwsgi/uwsgi.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += uwsgi/README.md uwsgi/Makefile.inc
+
diff --git a/collectors/python.d.plugin/uwsgi/README.md b/collectors/python.d.plugin/uwsgi/README.md
new file mode 100644
index 000000000..a062710df
--- /dev/null
+++ b/collectors/python.d.plugin/uwsgi/README.md
@@ -0,0 +1,37 @@
+# uwsgi
+
+Module monitor uwsgi performance metrics.
+
+https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html
+
+lines are creates dynamically based on how many workers are there
+
+Following charts are drawn:
+
+1. **Requests**
+ * requests per second
+ * transmitted data
+ * average request time
+
+2. **Memory**
+ * rss
+ * vsz
+
+3. **Exceptions**
+4. **Harakiris**
+5. **Respawns**
+
+### configuration
+
+```yaml
+socket:
+ name : 'local'
+ socket : '/tmp/stats.socket'
+
+localhost:
+ name : 'local'
+ host : 'localhost'
+ port : 1717
+```
+
+When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:1717`.
diff --git a/collectors/python.d.plugin/uwsgi/uwsgi.chart.py b/collectors/python.d.plugin/uwsgi/uwsgi.chart.py
new file mode 100644
index 000000000..5ebcfb55b
--- /dev/null
+++ b/collectors/python.d.plugin/uwsgi/uwsgi.chart.py
@@ -0,0 +1,183 @@
+# -*- coding: utf-8 -*-
+# Description: uwsgi netdata python.d module
+# Author: Robbert Segeren (robbert-ef)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import json
+from copy import deepcopy
+from bases.FrameworkServices.SocketService import SocketService
+
+# default module values (can be overridden per job in `config`)
+# update_every = 2
+priority = 60000
+retries = 60
+
+ORDER = [
+ 'requests',
+ 'tx',
+ 'avg_rt',
+ 'memory_rss',
+ 'memory_vsz',
+ 'exceptions',
+ 'harakiri',
+ 'respawn',
+]
+
+DYNAMIC_CHARTS = [
+ 'requests',
+ 'tx',
+ 'avg_rt',
+ 'memory_rss',
+ 'memory_vsz',
+]
+
+# NOTE: lines are created dynamically in `check()` method
+CHARTS = {
+ 'requests': {
+ 'options': [None, 'Requests', 'requests/s', 'requests', 'uwsgi.requests', 'stacked'],
+ 'lines': [
+ ['requests', 'requests', 'incremental']
+ ]
+ },
+ 'tx': {
+ 'options': [None, 'Transmitted data', 'KB/s', 'requests', 'uwsgi.tx', 'stacked'],
+ 'lines': [
+ ['tx', 'tx', 'incremental']
+ ]
+ },
+ 'avg_rt': {
+ 'options': [None, 'Average request time', 'ms', 'requests', 'uwsgi.avg_rt', 'line'],
+ 'lines': [
+ ['avg_rt', 'avg_rt', 'absolute']
+ ]
+ },
+ 'memory_rss': {
+ 'options': [None, 'RSS (Resident Set Size)', 'MB', 'memory', 'uwsgi.memory_rss', 'stacked'],
+ 'lines': [
+ ['memory_rss', 'memory_rss', 'absolute', 1, 1024 * 1024]
+ ]
+ },
+ 'memory_vsz': {
+ 'options': [None, 'VSZ (Virtual Memory Size)', 'MB', 'memory', 'uwsgi.memory_vsz', 'stacked'],
+ 'lines': [
+ ['memory_vsz', 'memory_vsz', 'absolute', 1, 1024 * 1024]
+ ]
+ },
+ 'exceptions': {
+ 'options': [None, 'Exceptions', 'exceptions', 'exceptions', 'uwsgi.exceptions', 'line'],
+ 'lines': [
+ ['exceptions', 'exceptions', 'incremental']
+ ]
+ },
+ 'harakiri': {
+ 'options': [None, 'Harakiris', 'harakiris', 'harakiris', 'uwsgi.harakiris', 'line'],
+ 'lines': [
+ ['harakiri_count', 'harakiris', 'incremental']
+ ]
+ },
+ 'respawn': {
+ 'options': [None, 'Respawns', 'respawns', 'respawns', 'uwsgi.respawns', 'line'],
+ 'lines': [
+ ['respawn_count', 'respawns', 'incremental']
+ ]
+ },
+}
+
+
+class Service(SocketService):
+ def __init__(self, configuration=None, name=None):
+ super(Service, self).__init__(configuration=configuration, name=name)
+ self.url = self.configuration.get('host', 'localhost')
+ self.port = self.configuration.get('port', 1717)
+ self.order = ORDER
+ self.definitions = deepcopy(CHARTS)
+
+ # Clear dynamic dimensions, these are added during `_get_data()` to allow adding workers at run-time
+ for chart in DYNAMIC_CHARTS:
+ self.definitions[chart]['lines'] = []
+
+ self.last_result = {}
+ self.workers = []
+
+ def read_data(self):
+ """
+ Read data from socket and parse as JSON.
+ :return: (dict) stats
+ """
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+ try:
+ return json.loads(raw_data)
+ except ValueError as err:
+ self.error(err)
+ return None
+
+ def check(self):
+ """
+ Parse configuration and check if we can read data.
+ :return: boolean
+ """
+ self._parse_config()
+ return bool(self.read_data())
+
+ def add_worker_dimensions(self, key):
+ """
+ Helper to add dimensions for a worker.
+ :param key: (int or str) worker identifier
+ :return:
+ """
+ for chart in DYNAMIC_CHARTS:
+ for line in CHARTS[chart]['lines']:
+ dimension_id = '{}_{}'.format(line[0], key)
+ dimension_name = str(key)
+
+ dimension = [dimension_id, dimension_name] + line[2:]
+ self.charts[chart].add_dimension(dimension)
+
+ @staticmethod
+ def _check_raw_data(data):
+ # The server will close the connection when it's done sending
+ # data, so just keep looping until that happens.
+ return False
+
+ def _get_data(self):
+ """
+ Read data from socket
+ :return: dict
+ """
+ stats = self.read_data()
+ if not stats:
+ return None
+
+ result = {
+ 'exceptions': 0,
+ 'harakiri_count': 0,
+ 'respawn_count': 0,
+ }
+
+ for worker in stats['workers']:
+ key = worker['pid']
+
+ # Add dimensions for new workers
+ if key not in self.workers:
+ self.add_worker_dimensions(key)
+ self.workers.append(key)
+
+ result['requests_{}'.format(key)] = worker['requests']
+ result['tx_{}'.format(key)] = worker['tx']
+ result['avg_rt_{}'.format(key)] = worker['avg_rt']
+
+ # avg_rt is not reset by uwsgi, so reset here
+ if self.last_result.get('requests_{}'.format(key)) == worker['requests']:
+ result['avg_rt_{}'.format(key)] = 0
+
+ result['memory_rss_{}'.format(key)] = worker['rss']
+ result['memory_vsz_{}'.format(key)] = worker['vsz']
+
+ result['exceptions'] += worker['exceptions']
+ result['harakiri_count'] += worker['harakiri_count']
+ result['respawn_count'] += worker['respawn_count']
+
+ self.last_result = result
+ return result
diff --git a/collectors/python.d.plugin/uwsgi/uwsgi.conf b/collectors/python.d.plugin/uwsgi/uwsgi.conf
new file mode 100644
index 000000000..be1c2ada3
--- /dev/null
+++ b/collectors/python.d.plugin/uwsgi/uwsgi.conf
@@ -0,0 +1,94 @@
+# netdata python.d.plugin configuration for uwsgi
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, uwsgi also supports the following:
+#
+# socket: 'path/to/uwsgistats.sock'
+#
+# or
+# host: 'IP or HOSTNAME' # the host to connect to
+# port: PORT # the port to connect to
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+#
+
+socket:
+ name : 'local'
+ socket : '/tmp/stats.socket'
+
+localhost:
+ name : 'local'
+ host : 'localhost'
+ port : 1717
+
+localipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 1717
+
+localipv6:
+ name : 'local'
+ host : '::1'
+ port : 1717
diff --git a/collectors/python.d.plugin/varnish/Makefile.inc b/collectors/python.d.plugin/varnish/Makefile.inc
new file mode 100644
index 000000000..2469b0592
--- /dev/null
+++ b/collectors/python.d.plugin/varnish/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += varnish/varnish.chart.py
+dist_pythonconfig_DATA += varnish/varnish.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += varnish/README.md varnish/Makefile.inc
+
diff --git a/collectors/python.d.plugin/varnish/README.md b/collectors/python.d.plugin/varnish/README.md
new file mode 100644
index 000000000..96c7cafaa
--- /dev/null
+++ b/collectors/python.d.plugin/varnish/README.md
@@ -0,0 +1,69 @@
+# varnish
+
+Module uses the `varnishstat` command to provide varnish cache statistics.
+
+It produces:
+
+1. **Connections Statistics** in connections/s
+ * accepted
+ * dropped
+
+2. **Client Requests** in requests/s
+ * received
+
+3. **All History Hit Rate Ratio** in percent
+ * hit
+ * miss
+ * hitpass
+
+4. **Current Poll Hit Rate Ratio** in percent
+ * hit
+ * miss
+ * hitpass
+
+5. **Expired Objects** in expired/s
+ * objects
+
+6. **Least Recently Used Nuked Objects** in nuked/s
+ * objects
+
+
+7. **Number Of Threads In All Pools** in threads
+ * threads
+
+8. **Threads Statistics** in threads/s
+ * created
+ * failed
+ * limited
+
+9. **Current Queue Length** in requests
+ * in queue
+
+10. **Backend Connections Statistics** in connections/s
+ * successful
+ * unhealthy
+ * reused
+ * closed
+ * resycled
+ * failed
+
+10. **Requests To The Backend** in requests/s
+ * received
+
+11. **ESI Statistics** in problems/s
+ * errors
+ * warnings
+
+12. **Memory Usage** in MB
+ * free
+ * allocated
+
+13. **Uptime** in seconds
+ * uptime
+
+
+### configuration
+
+No configuration is needed.
+
+---
diff --git a/collectors/python.d.plugin/varnish/varnish.chart.py b/collectors/python.d.plugin/varnish/varnish.chart.py
new file mode 100644
index 000000000..d889c2b33
--- /dev/null
+++ b/collectors/python.d.plugin/varnish/varnish.chart.py
@@ -0,0 +1,252 @@
+# -*- coding: utf-8 -*-
+# Description: varnish netdata python.d module
+# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import re
+
+from bases.collection import find_binary
+from bases.FrameworkServices.ExecutableService import ExecutableService
+
+# default module values (can be overridden per job in `config`)
+# update_every = 2
+priority = 60000
+retries = 60
+
+ORDER = [
+ 'session_connections',
+ 'client_requests',
+ 'all_time_hit_rate',
+ 'current_poll_hit_rate',
+ 'cached_objects_expired',
+ 'cached_objects_nuked',
+ 'threads_total',
+ 'threads_statistics',
+ 'threads_queue_len',
+ 'backend_connections',
+ 'backend_requests',
+ 'esi_statistics',
+ 'memory_usage',
+ 'uptime'
+]
+
+CHARTS = {
+ 'session_connections': {
+ 'options': [None, 'Connections Statistics', 'connections/s',
+ 'client metrics', 'varnish.session_connection', 'line'],
+ 'lines': [
+ ['sess_conn', 'accepted', 'incremental'],
+ ['sess_dropped', 'dropped', 'incremental']
+ ]
+ },
+ 'client_requests': {
+ 'options': [None, 'Client Requests', 'requests/s',
+ 'client metrics', 'varnish.client_requests', 'line'],
+ 'lines': [
+ ['client_req', 'received', 'incremental']
+ ]
+ },
+ 'all_time_hit_rate': {
+ 'options': [None, 'All History Hit Rate Ratio', 'percent', 'cache performance',
+ 'varnish.all_time_hit_rate', 'stacked'],
+ 'lines': [
+ ['cache_hit', 'hit', 'percentage-of-absolute-row'],
+ ['cache_miss', 'miss', 'percentage-of-absolute-row'],
+ ['cache_hitpass', 'hitpass', 'percentage-of-absolute-row']]
+ },
+ 'current_poll_hit_rate': {
+ 'options': [None, 'Current Poll Hit Rate Ratio', 'percent', 'cache performance',
+ 'varnish.current_poll_hit_rate', 'stacked'],
+ 'lines': [
+ ['cache_hit', 'hit', 'percentage-of-incremental-row'],
+ ['cache_miss', 'miss', 'percentage-of-incremental-row'],
+ ['cache_hitpass', 'hitpass', 'percentage-of-incremental-row']
+ ]
+ },
+ 'cached_objects_expired': {
+ 'options': [None, 'Expired Objects', 'expired/s', 'cache performance',
+ 'varnish.cached_objects_expired', 'line'],
+ 'lines': [
+ ['n_expired', 'objects', 'incremental']
+ ]
+ },
+ 'cached_objects_nuked': {
+ 'options': [None, 'Least Recently Used Nuked Objects', 'nuked/s', 'cache performance',
+ 'varnish.cached_objects_nuked', 'line'],
+ 'lines': [
+ ['n_lru_nuked', 'objects', 'incremental']
+ ]
+ },
+ 'threads_total': {
+ 'options': [None, 'Number Of Threads In All Pools', 'number', 'thread related metrics',
+ 'varnish.threads_total', 'line'],
+ 'lines': [
+ ['threads', None, 'absolute']
+ ]
+ },
+ 'threads_statistics': {
+ 'options': [None, 'Threads Statistics', 'threads/s', 'thread related metrics',
+ 'varnish.threads_statistics', 'line'],
+ 'lines': [
+ ['threads_created', 'created', 'incremental'],
+ ['threads_failed', 'failed', 'incremental'],
+ ['threads_limited', 'limited', 'incremental']
+ ]
+ },
+ 'threads_queue_len': {
+ 'options': [None, 'Current Queue Length', 'requests', 'thread related metrics',
+ 'varnish.threads_queue_len', 'line'],
+ 'lines': [
+ ['thread_queue_len', 'in queue']
+ ]
+ },
+ 'backend_connections': {
+ 'options': [None, 'Backend Connections Statistics', 'connections/s', 'backend metrics',
+ 'varnish.backend_connections', 'line'],
+ 'lines': [
+ ['backend_conn', 'successful', 'incremental'],
+ ['backend_unhealthy', 'unhealthy', 'incremental'],
+ ['backend_reuse', 'reused', 'incremental'],
+ ['backend_toolate', 'closed', 'incremental'],
+ ['backend_recycle', 'resycled', 'incremental'],
+ ['backend_fail', 'failed', 'incremental']
+ ]
+ },
+ 'backend_requests': {
+ 'options': [None, 'Requests To The Backend', 'requests/s', 'backend metrics',
+ 'varnish.backend_requests', 'line'],
+ 'lines': [
+ ['backend_req', 'sent', 'incremental']
+ ]
+ },
+ 'esi_statistics': {
+ 'options': [None, 'ESI Statistics', 'problems/s', 'esi related metrics', 'varnish.esi_statistics', 'line'],
+ 'lines': [
+ ['esi_errors', 'errors', 'incremental'],
+ ['esi_warnings', 'warnings', 'incremental']
+ ]
+ },
+ 'memory_usage': {
+ 'options': [None, 'Memory Usage', 'MB', 'memory usage', 'varnish.memory_usage', 'stacked'],
+ 'lines': [
+ ['memory_free', 'free', 'absolute', 1, 1 << 20],
+ ['memory_allocated', 'allocated', 'absolute', 1, 1 << 20]]
+ },
+ 'uptime': {
+ 'lines': [
+ ['uptime', None, 'absolute']
+ ],
+ 'options': [None, 'Uptime', 'seconds', 'uptime', 'varnish.uptime', 'line']
+ }
+}
+
+
+class Parser:
+ _backend_new = re.compile(r'VBE.([\d\w_.]+)\(.*?\).(beresp[\w_]+)\s+(\d+)')
+ _backend_old = re.compile(r'VBE\.[\d\w-]+\.([\w\d_]+).(beresp[\w_]+)\s+(\d+)')
+ _default = re.compile(r'([A-Z]+\.)?([\d\w_.]+)\s+(\d+)')
+
+ def __init__(self):
+ self.re_default = None
+ self.re_backend = None
+
+ def init(self, data):
+ data = ''.join(data)
+ parsed_main = Parser._default.findall(data)
+ if parsed_main:
+ self.re_default = Parser._default
+
+ parsed_backend = Parser._backend_new.findall(data)
+ if parsed_backend:
+ self.re_backend = Parser._backend_new
+ else:
+ parsed_backend = Parser._backend_old.findall(data)
+ if parsed_backend:
+ self.re_backend = Parser._backend_old
+
+ def server_stats(self, data):
+ return self.re_default.findall(''.join(data))
+
+ def backend_stats(self, data):
+ return self.re_backend.findall(''.join(data))
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ varnishstat = find_binary('varnishstat')
+ self.command = [varnishstat, '-1'] if varnishstat else None
+ self.parser = Parser()
+
+ def check(self):
+ if not self.command:
+ self.error("Can't locate 'varnishstat' binary or binary is not executable by user netdata")
+ return False
+
+ # STDOUT is not empty
+ reply = self._get_raw_data()
+ if not reply:
+ self.error("No output from 'varnishstat'. Not enough privileges?")
+ return False
+
+ self.parser.init(reply)
+
+ # Output is parsable
+ if not self.parser.re_default:
+ self.error('Cant parse the output...')
+ return False
+
+ if self.parser.re_backend:
+ backends = [b[0] for b in self.parser.backend_stats(reply)[::2]]
+ self.create_backends_charts(backends)
+ return True
+
+ def get_data(self):
+ """
+ Format data received from shell command
+ :return: dict
+ """
+ raw = self._get_raw_data()
+ if not raw:
+ return None
+
+ data = dict()
+ server_stats = self.parser.server_stats(raw)
+ if not server_stats:
+ return None
+
+ if self.parser.re_backend:
+ backend_stats = self.parser.backend_stats(raw)
+ data.update(dict(('_'.join([name, param]), value) for name, param, value in backend_stats))
+
+ data.update(dict((param, value) for _, param, value in server_stats))
+
+ # varnish 5 uses default.g_bytes and default.g_space
+ data['memory_allocated'] = data.get('s0.g_bytes') or data.get('default.g_bytes')
+ data['memory_free'] = data.get('s0.g_space') or data.get('default.g_space')
+
+ return data
+
+ def create_backends_charts(self, backends):
+ for backend in backends:
+ chart_name = ''.join([backend, '_response_statistics'])
+ title = 'Backend "{0}"'.format(backend.capitalize())
+ hdr_bytes = ''.join([backend, '_beresp_hdrbytes'])
+ body_bytes = ''.join([backend, '_beresp_bodybytes'])
+
+ chart = {
+ chart_name:
+ {
+ 'options': [None, title, 'kilobits/s', 'backend response statistics',
+ 'varnish.backend', 'area'],
+ 'lines': [
+ [hdr_bytes, 'header', 'incremental', 8, 1000],
+ [body_bytes, 'body', 'incremental', -8, 1000]
+ ]
+ }
+ }
+
+ self.order.insert(0, chart_name)
+ self.definitions.update(chart)
diff --git a/collectors/python.d.plugin/varnish/varnish.conf b/collectors/python.d.plugin/varnish/varnish.conf
new file mode 100644
index 000000000..4b069d514
--- /dev/null
+++ b/collectors/python.d.plugin/varnish/varnish.conf
@@ -0,0 +1,64 @@
+# netdata python.d.plugin configuration for varnish
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/w1sensor/Makefile.inc b/collectors/python.d.plugin/w1sensor/Makefile.inc
new file mode 100644
index 000000000..bddf146f5
--- /dev/null
+++ b/collectors/python.d.plugin/w1sensor/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += w1sensor/w1sensor.chart.py
+dist_pythonconfig_DATA += w1sensor/w1sensor.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += w1sensor/README.md w1sensor/Makefile.inc
+
diff --git a/collectors/python.d.plugin/w1sensor/README.md b/collectors/python.d.plugin/w1sensor/README.md
new file mode 100644
index 000000000..b18f08351
--- /dev/null
+++ b/collectors/python.d.plugin/w1sensor/README.md
@@ -0,0 +1,13 @@
+# w1sensor
+
+Data from 1-Wire sensors.
+On Linux these are supported by the wire, w1_gpio, and w1_therm modules.
+Currently temperature sensors are supported and automatically detected.
+
+Charts are created dynamically based on the number of detected sensors.
+
+### configuration
+
+For detailed configuration information please read [`w1sensor.conf`](w1sensor.conf) file.
+
+---
diff --git a/collectors/python.d.plugin/w1sensor/w1sensor.chart.py b/collectors/python.d.plugin/w1sensor/w1sensor.chart.py
new file mode 100644
index 000000000..493c4a135
--- /dev/null
+++ b/collectors/python.d.plugin/w1sensor/w1sensor.chart.py
@@ -0,0 +1,93 @@
+# -*- coding: utf-8 -*-
+# Description: 1-wire temperature monitor netdata python.d module
+# Author: Diomidis Spinellis <http://www.spinellis.gr>
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import os
+import re
+from bases.FrameworkServices.SimpleService import SimpleService
+
+# default module values (can be overridden per job in `config`)
+update_every = 5
+
+# Location where 1-Wire devices can be found
+W1_DIR = '/sys/bus/w1/devices/'
+
+# Lines matching the following regular expression contain a temperature value
+RE_TEMP = re.compile(r' t=(\d+)')
+
+ORDER = ['temp']
+
+CHARTS = {
+ 'temp': {
+ 'options': [None, '1-Wire Temperature Sensor', 'Celsius', 'Temperature', 'w1sensor.temp', 'line'],
+ 'lines': []
+ }
+}
+
+# Known and supported family members
+# Based on linux/drivers/w1/w1_family.h and w1/slaves/w1_therm.c
+THERM_FAMILY = {
+ '10': 'W1_THERM_DS18S20',
+ '22': 'W1_THERM_DS1822',
+ '28': 'W1_THERM_DS18B20',
+ '3b': 'W1_THERM_DS1825',
+ '42': 'W1_THERM_DS28EA00',
+}
+
+
+class Service(SimpleService):
+ """Provide netdata service for 1-Wire sensors"""
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.probes = []
+
+ def check(self):
+ """Auto-detect available 1-Wire sensors, setting line definitions
+ and probes to be monitored."""
+ try:
+ file_names = os.listdir(W1_DIR)
+ except OSError as err:
+ self.error(err)
+ return False
+
+ lines = []
+ for file_name in file_names:
+ if file_name[2] != '-':
+ continue
+ if not file_name[0:2] in THERM_FAMILY:
+ continue
+
+ self.probes.append(file_name)
+ identifier = file_name[3:]
+ name = identifier
+ config_name = self.configuration.get('name_' + identifier)
+ if config_name:
+ name = config_name
+ lines.append(['w1sensor_temp_' + identifier, name, 'absolute',
+ 1, 10])
+ self.definitions['temp']['lines'] = lines
+ return len(self.probes) > 0
+
+ def get_data(self):
+ """Return data read from sensors."""
+ data = dict()
+
+ for file_name in self.probes:
+ file_path = W1_DIR + file_name + '/w1_slave'
+ identifier = file_name[3:]
+ try:
+ with open(file_path, 'r') as device_file:
+ for line in device_file:
+ matched = RE_TEMP.search(line)
+ if matched:
+ # Round to one decimal digit to filter-out noise
+ value = round(int(matched.group(1)) / 1000., 1)
+ value = int(value * 10)
+ data['w1sensor_temp_' + identifier] = value
+ except (OSError, IOError) as err:
+ self.error(err)
+ continue
+ return data or None
diff --git a/collectors/python.d.plugin/w1sensor/w1sensor.conf b/collectors/python.d.plugin/w1sensor/w1sensor.conf
new file mode 100644
index 000000000..a4aed8dd7
--- /dev/null
+++ b/collectors/python.d.plugin/w1sensor/w1sensor.conf
@@ -0,0 +1,74 @@
+# netdata python.d.plugin configuration for w1sensor
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 5
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 5 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, example also supports the following:
+#
+# name_<1-Wire id>: '<human readable name>'
+# This allows associating a human readable name with a sensor's 1-Wire
+# identifier. Example:
+# name_00000022276e: 'Machine room'
+# name_00000022298f: 'Rack 12'
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
diff --git a/collectors/python.d.plugin/web_log/Makefile.inc b/collectors/python.d.plugin/web_log/Makefile.inc
new file mode 100644
index 000000000..893115992
--- /dev/null
+++ b/collectors/python.d.plugin/web_log/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += web_log/web_log.chart.py
+dist_pythonconfig_DATA += web_log/web_log.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += web_log/README.md web_log/Makefile.inc
+
diff --git a/collectors/python.d.plugin/web_log/README.md b/collectors/python.d.plugin/web_log/README.md
new file mode 100644
index 000000000..6e8ea1dd5
--- /dev/null
+++ b/collectors/python.d.plugin/web_log/README.md
@@ -0,0 +1,64 @@
+# web_log
+
+Tails the apache/nginx/lighttpd/gunicorn log files to collect real-time web-server statistics.
+
+It produces following charts:
+
+1. **Response by type** requests/s
+ * success (1xx, 2xx, 304)
+ * error (5xx)
+ * redirect (3xx except 304)
+ * bad (4xx)
+ * other (all other responses)
+
+2. **Response by code family** requests/s
+ * 1xx (informational)
+ * 2xx (successful)
+ * 3xx (redirect)
+ * 4xx (bad)
+ * 5xx (internal server errors)
+ * other (non-standart responses)
+ * unmatched (the lines in the log file that are not matched)
+
+3. **Detailed Response Codes** requests/s (number of responses for each response code family individually)
+
+4. **Bandwidth** KB/s
+ * received (bandwidth of requests)
+ * send (bandwidth of responses)
+
+5. **Timings** ms (request processing time)
+ * min (bandwidth of requests)
+ * max (bandwidth of responses)
+ * average (bandwidth of responses)
+
+6. **Request per url** requests/s (configured by user)
+
+7. **Http Methods** requests/s (requests per http method)
+
+8. **Http Versions** requests/s (requests per http version)
+
+9. **IP protocols** requests/s (requests per ip protocol version)
+
+10. **Current Poll Unique Client IPs** unique ips/s (unique client IPs per data collection iteration)
+
+11. **All Time Unique Client IPs** unique ips/s (unique client IPs since the last restart of netdata)
+
+
+### configuration
+
+```yaml
+nginx_log:
+ name : 'nginx_log'
+ path : '/var/log/nginx/access.log'
+
+apache_log:
+ name : 'apache_log'
+ path : '/var/log/apache/other_vhosts_access.log'
+ categories:
+ cacti : 'cacti.*'
+ observium : 'observium'
+```
+
+Module has preconfigured jobs for nginx, apache and gunicorn on various distros.
+
+---
diff --git a/collectors/python.d.plugin/web_log/web_log.chart.py b/collectors/python.d.plugin/web_log/web_log.chart.py
new file mode 100644
index 000000000..20e15f4cb
--- /dev/null
+++ b/collectors/python.d.plugin/web_log/web_log.chart.py
@@ -0,0 +1,1194 @@
+# -*- coding: utf-8 -*-
+# Description: web log netdata python.d module
+# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import bisect
+import re
+import os
+
+from collections import namedtuple, defaultdict
+from copy import deepcopy
+
+try:
+ from itertools import filterfalse
+except ImportError:
+ from itertools import ifilter as filter
+ from itertools import ifilterfalse as filterfalse
+
+try:
+ from sys import maxint
+except ImportError:
+ from sys import maxsize as maxint
+
+from bases.collection import read_last_line
+from bases.FrameworkServices.LogService import LogService
+
+
+ORDER_APACHE_CACHE = ['apache_cache']
+
+ORDER_WEB = [
+ 'response_statuses',
+ 'response_codes',
+ 'bandwidth',
+ 'response_time',
+ 'response_time_hist',
+ 'response_time_upstream',
+ 'response_time_upstream_hist',
+ 'requests_per_url',
+ 'requests_per_user_defined',
+ 'http_method',
+ 'vhost',
+ 'port',
+ 'http_version',
+ 'requests_per_ipproto',
+ 'clients',
+ 'clients_all'
+]
+
+ORDER_SQUID = [
+ 'squid_response_statuses',
+ 'squid_response_codes',
+ 'squid_detailed_response_codes',
+ 'squid_method',
+ 'squid_mime_type',
+ 'squid_hier_code',
+ 'squid_transport_methods',
+ 'squid_transport_errors',
+ 'squid_code',
+ 'squid_handling_opts',
+ 'squid_object_types',
+ 'squid_cache_events',
+ 'squid_bytes',
+ 'squid_duration',
+ 'squid_clients',
+ 'squid_clients_all'
+]
+
+CHARTS_WEB = {
+ 'response_codes': {
+ 'options': [None, 'Response Codes', 'requests/s', 'responses', 'web_log.response_codes', 'stacked'],
+ 'lines': [
+ ['2xx', None, 'incremental'],
+ ['5xx', None, 'incremental'],
+ ['3xx', None, 'incremental'],
+ ['4xx', None, 'incremental'],
+ ['1xx', None, 'incremental'],
+ ['0xx', 'other', 'incremental'],
+ ['unmatched', None, 'incremental']
+ ]
+ },
+ 'bandwidth': {
+ 'options': [None, 'Bandwidth', 'kilobits/s', 'bandwidth', 'web_log.bandwidth', 'area'],
+ 'lines': [
+ ['resp_length', 'received', 'incremental', 8, 1000],
+ ['bytes_sent', 'sent', 'incremental', -8, 1000]
+ ]
+ },
+ 'response_time': {
+ 'options': [None, 'Processing Time', 'milliseconds', 'timings', 'web_log.response_time', 'area'],
+ 'lines': [
+ ['resp_time_min', 'min', 'incremental', 1, 1000],
+ ['resp_time_max', 'max', 'incremental', 1, 1000],
+ ['resp_time_avg', 'avg', 'incremental', 1, 1000]
+ ]
+ },
+ 'response_time_hist': {
+ 'options': [None, 'Processing Time Histogram', 'requests/s', 'timings', 'web_log.response_time_hist', 'line'],
+ 'lines': []
+ },
+ 'response_time_upstream': {
+ 'options': [None, 'Processing Time Upstream', 'milliseconds', 'timings',
+ 'web_log.response_time_upstream', 'area'],
+ 'lines': [
+ ['resp_time_upstream_min', 'min', 'incremental', 1, 1000],
+ ['resp_time_upstream_max', 'max', 'incremental', 1, 1000],
+ ['resp_time_upstream_avg', 'avg', 'incremental', 1, 1000]
+ ]
+ },
+ 'response_time_upstream_hist': {
+ 'options': [None, 'Processing Time Histogram', 'requests/s', 'timings',
+ 'web_log.response_time_upstream_hist', 'line'],
+ 'lines': []
+ },
+ 'clients': {
+ 'options': [None, 'Current Poll Unique Client IPs', 'unique ips', 'clients', 'web_log.clients', 'stacked'],
+ 'lines': [
+ ['unique_cur_ipv4', 'ipv4', 'incremental', 1, 1],
+ ['unique_cur_ipv6', 'ipv6', 'incremental', 1, 1]
+ ]
+ },
+ 'clients_all': {
+ 'options': [None, 'All Time Unique Client IPs', 'unique ips', 'clients', 'web_log.clients_all', 'stacked'],
+ 'lines': [
+ ['unique_tot_ipv4', 'ipv4', 'absolute', 1, 1],
+ ['unique_tot_ipv6', 'ipv6', 'absolute', 1, 1]
+ ]
+ },
+ 'http_method': {
+ 'options': [None, 'Requests Per HTTP Method', 'requests/s', 'http methods', 'web_log.http_method', 'stacked'],
+ 'lines': [
+ ['GET', 'GET', 'incremental', 1, 1]
+ ]
+ },
+ 'http_version': {
+ 'options': [None, 'Requests Per HTTP Version', 'requests/s', 'http versions',
+ 'web_log.http_version', 'stacked'],
+ 'lines': []
+ },
+ 'requests_per_ipproto': {
+ 'options': [None, 'Requests Per IP Protocol', 'requests/s', 'ip protocols', 'web_log.requests_per_ipproto',
+ 'stacked'],
+ 'lines': [
+ ['req_ipv4', 'ipv4', 'incremental', 1, 1],
+ ['req_ipv6', 'ipv6', 'incremental', 1, 1]
+ ]
+ },
+ 'response_statuses': {
+ 'options': [None, 'Response Statuses', 'requests/s', 'responses', 'web_log.response_statuses', 'stacked'],
+ 'lines': [
+ ['successful_requests', 'success', 'incremental', 1, 1],
+ ['server_errors', 'error', 'incremental', 1, 1],
+ ['redirects', 'redirect', 'incremental', 1, 1],
+ ['bad_requests', 'bad', 'incremental', 1, 1],
+ ['other_requests', 'other', 'incremental', 1, 1]
+ ]
+ },
+ 'requests_per_url': {
+ 'options': [None, 'Requests Per Url', 'requests/s', 'urls', 'web_log.requests_per_url', 'stacked'],
+ 'lines': [
+ ['url_pattern_other', 'other', 'incremental', 1, 1]
+ ]
+ },
+ 'requests_per_user_defined': {
+ 'options': [None, 'Requests Per User Defined Pattern', 'requests/s', 'user defined',
+ 'web_log.requests_per_user_defined', 'stacked'],
+ 'lines': [
+ ['user_pattern_other', 'other', 'incremental', 1, 1]
+ ]
+ },
+ 'port': {
+ 'options': [None, 'Requests Per Port', 'requests/s', 'port', 'web_log.port', 'stacked'],
+ 'lines': [
+ ['port_80', 'http', 'incremental', 1, 1],
+ ['port_443', 'https', 'incremental', 1, 1]
+ ]
+ },
+ 'vhost': {
+ 'options': [None, 'Requests Per Vhost', 'requests/s', 'vhost', 'web_log.vhost', 'stacked'],
+ 'lines': []
+ }
+}
+
+CHARTS_APACHE_CACHE = {
+ 'apache_cache': {
+ 'options': [None, 'Apache Cached Responses', 'percent cached', 'cached', 'web_log.apache_cache_cache',
+ 'stacked'],
+ 'lines': [
+ ['hit', 'cache', 'percentage-of-absolute-row'],
+ ['miss', None, 'percentage-of-absolute-row'],
+ ['other', None, 'percentage-of-absolute-row']
+ ]
+ }
+}
+
+CHARTS_SQUID = {
+ 'squid_duration': {
+ 'options': [None, 'Elapsed Time The Transaction Busied The Cache',
+ 'milliseconds', 'squid_timings', 'web_log.squid_duration', 'area'],
+ 'lines': [
+ ['duration_min', 'min', 'incremental', 1, 1000],
+ ['duration_max', 'max', 'incremental', 1, 1000],
+ ['duration_avg', 'avg', 'incremental', 1, 1000]
+ ]
+ },
+ 'squid_bytes': {
+ 'options': [None, 'Amount Of Data Delivered To The Clients',
+ 'kilobits/s', 'squid_bandwidth', 'web_log.squid_bytes', 'area'],
+ 'lines': [
+ ['bytes', 'sent', 'incremental', 8, 1000]
+ ]
+ },
+ 'squid_response_statuses': {
+ 'options': [None, 'Response Statuses', 'responses/s', 'squid_responses', 'web_log.squid_response_statuses',
+ 'stacked'],
+ 'lines': [
+ ['successful_requests', 'success', 'incremental', 1, 1],
+ ['server_errors', 'error', 'incremental', 1, 1],
+ ['redirects', 'redirect', 'incremental', 1, 1],
+ ['bad_requests', 'bad', 'incremental', 1, 1],
+ ['other_requests', 'other', 'incremental', 1, 1]
+ ]
+ },
+ 'squid_response_codes': {
+ 'options': [None, 'Response Codes', 'responses/s', 'squid_responses',
+ 'web_log.squid_response_codes', 'stacked'],
+ 'lines': [
+ ['2xx', None, 'incremental'],
+ ['5xx', None, 'incremental'],
+ ['3xx', None, 'incremental'],
+ ['4xx', None, 'incremental'],
+ ['1xx', None, 'incremental'],
+ ['0xx', None, 'incremental'],
+ ['other', None, 'incremental'],
+ ['unmatched', None, 'incremental']
+ ]
+ },
+ 'squid_code': {
+ 'options': [None, 'Responses Per Cache Result Of The Request',
+ 'requests/s', 'squid_squid_cache', 'web_log.squid_code', 'stacked'],
+ 'lines': []
+ },
+ 'squid_detailed_response_codes': {
+ 'options': [None, 'Detailed Response Codes',
+ 'responses/s', 'squid_responses', 'web_log.squid_detailed_response_codes', 'stacked'],
+ 'lines': []
+ },
+ 'squid_hier_code': {
+ 'options': [None, 'Responses Per Hierarchy Code',
+ 'requests/s', 'squid_hierarchy', 'web_log.squid_hier_code', 'stacked'],
+ 'lines': []
+ },
+ 'squid_method': {
+ 'options': [None, 'Requests Per Method',
+ 'requests/s', 'squid_requests', 'web_log.squid_method', 'stacked'],
+ 'lines': []
+ },
+ 'squid_mime_type': {
+ 'options': [None, 'Requests Per MIME Type',
+ 'requests/s', 'squid_requests', 'web_log.squid_mime_type', 'stacked'],
+ 'lines': []
+ },
+ 'squid_clients': {
+ 'options': [None, 'Current Poll Unique Client IPs', 'unique ips', 'squid_clients',
+ 'web_log.squid_clients', 'stacked'],
+ 'lines': [
+ ['unique_ipv4', 'ipv4', 'incremental'],
+ ['unique_ipv6', 'ipv6', 'incremental']
+ ]
+ },
+ 'squid_clients_all': {
+ 'options': [None, 'All Time Unique Client IPs', 'unique ips', 'squid_clients',
+ 'web_log.squid_clients_all', 'stacked'],
+ 'lines': [
+ ['unique_tot_ipv4', 'ipv4', 'absolute'],
+ ['unique_tot_ipv6', 'ipv6', 'absolute']
+ ]
+ },
+ 'squid_transport_methods': {
+ 'options': [None, 'Transport Methods', 'requests/s', 'squid_squid_transport',
+ 'web_log.squid_transport_methods', 'stacked'],
+ 'lines': []
+ },
+ 'squid_transport_errors': {
+ 'options': [None, 'Transport Errors', 'requests/s', 'squid_squid_transport',
+ 'web_log.squid_transport_errors', 'stacked'],
+ 'lines': []
+ },
+ 'squid_handling_opts': {
+ 'options': [None, 'Handling Opts', 'requests/s', 'squid_squid_cache',
+ 'web_log.squid_handling_opts', 'stacked'],
+ 'lines': []
+ },
+ 'squid_object_types': {
+ 'options': [None, 'Object Types', 'objects/s', 'squid_squid_cache',
+ 'web_log.squid_object_types', 'stacked'],
+ 'lines': []
+ },
+ 'squid_cache_events': {
+ 'options': [None, 'Cache Events', 'events/s', 'squid_squid_cache',
+ 'web_log.squid_cache_events', 'stacked'],
+ 'lines': []
+ }
+}
+
+NAMED_PATTERN = namedtuple('PATTERN', ['description', 'func'])
+
+DET_RESP_AGGR = ['', '_1xx', '_2xx', '_3xx', '_4xx', '_5xx', '_Other']
+
+SQUID_CODES = {
+ 'TCP': 'squid_transport_methods',
+ 'UDP': 'squid_transport_methods',
+ 'NONE': 'squid_transport_methods',
+ 'CLIENT': 'squid_handling_opts',
+ 'IMS': 'squid_handling_opts',
+ 'ASYNC': 'squid_handling_opts',
+ 'SWAPFAIL': 'squid_handling_opts',
+ 'REFRESH': 'squid_handling_opts',
+ 'SHARED': 'squid_handling_opts',
+ 'REPLY': 'squid_handling_opts',
+ 'NEGATIVE': 'squid_object_types',
+ 'STALE': 'squid_object_types',
+ 'OFFLINE': 'squid_object_types',
+ 'INVALID': 'squid_object_types',
+ 'FAIL': 'squid_object_types',
+ 'MODIFIED': 'squid_object_types',
+ 'UNMODIFIED': 'squid_object_types',
+ 'REDIRECT': 'squid_object_types',
+ 'HIT': 'squid_cache_events',
+ 'MEM': 'squid_cache_events',
+ 'MISS': 'squid_cache_events',
+ 'DENIED': 'squid_cache_events',
+ 'NOFETCH': 'squid_cache_events',
+ 'TUNNEL': 'squid_cache_events',
+ 'ABORTED': 'squid_transport_errors',
+ 'TIMEOUT': 'squid_transport_errors'
+}
+
+REQUEST_REGEX = re.compile(r'(?P<method>[A-Z]+) (?P<url>[^ ]+) [A-Z]+/(?P<http_version>\d(?:.\d)?)')
+
+MIME_TYPES = ['application', 'audio', 'example', 'font', 'image', 'message', 'model', 'multipart', 'text', 'video']
+
+
+class Service(LogService):
+ def __init__(self, configuration=None, name=None):
+ """
+ :param configuration:
+ :param name:
+ """
+ LogService.__init__(self, configuration=configuration, name=name)
+ self.configuration = configuration
+ self.log_path = self.configuration.get('path')
+ self.job = None
+
+ def check(self):
+ """
+ :return: bool
+
+ 1. "log_path" is specified in the module configuration file
+ 2. "log_path" must be readable by netdata user and must exist
+ 3. "log_path' must not be empty. We need at least 1 line to find appropriate pattern to parse
+ 4. other checks depends on log "type"
+ """
+
+ log_type = self.configuration.get('type', 'web')
+ log_types = dict(web=Web, apache_cache=ApacheCache, squid=Squid)
+
+ if log_type not in log_types:
+ self.error('bad log type {log_type}. Supported types: {types}'.format(log_type=log_type,
+ types=log_types.keys()))
+ return False
+
+ if not self.log_path:
+ self.error('log path is not specified')
+ return False
+
+ if not (self._find_recent_log_file() and os.access(self.log_path, os.R_OK)):
+ self.error('{log_file} not readable or not exist'.format(log_file=self.log_path))
+ return False
+
+ if not os.path.getsize(self.log_path):
+ self.error('{log_file} is empty'.format(log_file=self.log_path))
+ return False
+
+ self.job = log_types[log_type](self)
+ if self.job.check():
+ self.order = self.job.order
+ self.definitions = self.job.definitions
+ return True
+ return False
+
+ def _get_data(self):
+ return self.job.get_data(self._get_raw_data())
+
+
+class Web:
+ def __init__(self, service):
+ self.service = service
+ self.order = ORDER_WEB[:]
+ self.definitions = deepcopy(CHARTS_WEB)
+ self.pre_filter = check_patterns('filter', self.configuration.get('filter'))
+ self.storage = dict()
+ self.data = {
+ 'bytes_sent': 0,
+ 'resp_length': 0,
+ 'resp_time_min': 0,
+ 'resp_time_max': 0,
+ 'resp_time_avg': 0,
+ 'resp_time_upstream_min': 0,
+ 'resp_time_upstream_max': 0,
+ 'resp_time_upstream_avg': 0,
+ 'unique_cur_ipv4': 0,
+ 'unique_cur_ipv6': 0,
+ '2xx': 0,
+ '5xx': 0,
+ '3xx': 0,
+ '4xx': 0,
+ '1xx': 0,
+ '0xx': 0,
+ 'unmatched': 0,
+ 'req_ipv4': 0,
+ 'req_ipv6': 0,
+ 'unique_tot_ipv4': 0,
+ 'unique_tot_ipv6': 0,
+ 'successful_requests': 0,
+ 'redirects': 0,
+ 'bad_requests': 0,
+ 'server_errors': 0,
+ 'other_requests': 0,
+ 'GET': 0
+ }
+
+ def __getattr__(self, item):
+ return getattr(self.service, item)
+
+ def check(self):
+ last_line = read_last_line(self.log_path)
+ if not last_line:
+ return False
+ # Custom_log_format or predefined log format.
+ if self.configuration.get('custom_log_format'):
+ match_dict, error = self.find_regex_custom(last_line)
+ else:
+ match_dict, error = self.find_regex(last_line)
+
+ # "match_dict" is None if there are any problems
+ if match_dict is None:
+ self.error(error)
+ return False
+
+ self.storage['unique_all_time'] = list()
+ self.storage['url_pattern'] = check_patterns('url_pattern', self.configuration.get('categories'))
+ self.storage['user_pattern'] = check_patterns('user_pattern', self.configuration.get('user_defined'))
+
+ self.create_web_charts(match_dict) # Create charts
+ self.info('Collected data: %s' % list(match_dict.keys()))
+ return True
+
+ def create_web_charts(self, match_dict):
+ """
+ :param match_dict: dict: regex.search.groupdict(). Ex. {'address': '127.0.0.1', 'code': '200', 'method': 'GET'}
+ :return:
+ Create/remove additional charts depending on the 'match_dict' keys and configuration file options
+ """
+ if 'resp_time' not in match_dict:
+ self.order.remove('response_time')
+ self.order.remove('response_time_hist')
+ if 'resp_time_upstream' not in match_dict:
+ self.order.remove('response_time_upstream')
+ self.order.remove('response_time_upstream_hist')
+
+ # Add 'response_time_hist' and 'response_time_upstream_hist' charts if is specified in the configuration
+ histogram = self.configuration.get('histogram', None)
+ if isinstance(histogram, list):
+ self.storage['bucket_index'] = histogram[:]
+ self.storage['bucket_index'].append(maxint)
+ self.storage['buckets'] = [0] * (len(histogram) + 1)
+ self.storage['upstream_buckets'] = [0] * (len(histogram) + 1)
+ hist_lines = self.definitions['response_time_hist']['lines']
+ upstream_hist_lines = self.definitions['response_time_upstream_hist']['lines']
+ for i, le in enumerate(histogram):
+ hist_key = 'response_time_hist_%d' % i
+ upstream_hist_key = 'response_time_upstream_hist_%d' % i
+ hist_lines.append([hist_key, str(le), 'incremental', 1, 1])
+ upstream_hist_lines.append([upstream_hist_key, str(le), 'incremental', 1, 1])
+
+ hist_lines.append(['response_time_hist_%d' % len(histogram), '+Inf', 'incremental', 1, 1])
+ upstream_hist_lines.append(['response_time_upstream_hist_%d' % len(histogram), '+Inf', 'incremental', 1, 1])
+ elif histogram is not None:
+ self.error('expect histogram list, but was {0}'.format(type(histogram)))
+
+ if not self.configuration.get('all_time', True):
+ self.order.remove('clients_all')
+
+ # Add 'detailed_response_codes' chart if specified in the configuration
+ if self.configuration.get('detailed_response_codes', True):
+ if self.configuration.get('detailed_response_aggregate', True):
+ codes = DET_RESP_AGGR[:1]
+ else:
+ codes = DET_RESP_AGGR[1:]
+
+ for code in codes:
+ self.order.append('detailed_response_codes%s' % code)
+ self.definitions['detailed_response_codes%s' % code] = {
+ 'options': [None, 'Detailed Response Codes %s' % code[1:], 'requests/s', 'responses',
+ 'web_log.detailed_response_codes%s' % code, 'stacked'],
+ 'lines': []
+ }
+
+ # Add 'requests_per_url' chart if specified in the configuration
+ if self.storage['url_pattern']:
+ for elem in self.storage['url_pattern']:
+ dim = [elem.description, elem.description[12:], 'incremental']
+ self.definitions['requests_per_url']['lines'].append(dim)
+ self.data[elem.description] = 0
+ self.data['url_pattern_other'] = 0
+ else:
+ self.order.remove('requests_per_url')
+
+ # Add 'requests_per_user_defined' chart if specified in the configuration
+ if self.storage['user_pattern'] and 'user_defined' in match_dict:
+ for elem in self.storage['user_pattern']:
+ dim = [elem.description, elem.description[13:], 'incremental']
+ self.definitions['requests_per_user_defined']['lines'].append(dim)
+ self.data[elem.description] = 0
+ self.data['user_pattern_other'] = 0
+ else:
+ self.order.remove('requests_per_user_defined')
+
+ def get_data(self, raw_data=None):
+ """
+ Parses new log lines
+ :return: dict OR None
+ None if _get_raw_data method fails.
+ In all other cases - dict.
+ """
+ if not raw_data:
+ return None if raw_data is None else self.data
+
+ filtered_data = filter_data(raw_data=raw_data, pre_filter=self.pre_filter)
+
+ unique_current = set()
+ timings = defaultdict(lambda: dict(minimum=None, maximum=0, summary=0, count=0))
+
+ for line in filtered_data:
+ match = self.storage['regex'].search(line)
+ if match:
+ match_dict = match.groupdict()
+ try:
+ code = match_dict['code'][0] + 'xx'
+ self.data[code] += 1
+ except KeyError:
+ self.data['0xx'] += 1
+ # detailed response code
+ if self.configuration.get('detailed_response_codes', True):
+ self.get_data_per_response_codes_detailed(code=match_dict['code'])
+ # response statuses
+ self.get_data_per_statuses(code=match_dict['code'])
+ # requests per user defined pattern
+ if self.storage['user_pattern'] and 'user_defined' in match_dict:
+ self.get_data_per_pattern(row=match_dict['user_defined'],
+ other='user_pattern_other',
+ pattern=self.storage['user_pattern'])
+ # method, url, http version
+ self.get_data_from_request_field(match_dict=match_dict)
+ # bandwidth sent
+ bytes_sent = match_dict['bytes_sent'] if '-' not in match_dict['bytes_sent'] else 0
+ self.data['bytes_sent'] += int(bytes_sent)
+ # request processing time and bandwidth received
+ if 'resp_length' in match_dict:
+ resp_length = match_dict['resp_length'] if '-' not in match_dict['resp_length'] else 0
+ self.data['resp_length'] += int(resp_length)
+ if 'resp_time' in match_dict:
+ resp_time = self.storage['func_resp_time'](float(match_dict['resp_time']))
+ get_timings(timings=timings['resp_time'], time=resp_time)
+ if 'bucket_index' in self.storage:
+ get_hist(self.storage['bucket_index'], self.storage['buckets'], resp_time / 1000)
+ if 'resp_time_upstream' in match_dict and match_dict['resp_time_upstream'] != '-':
+ resp_time_upstream = self.storage['func_resp_time'](float(match_dict['resp_time_upstream']))
+ get_timings(timings=timings['resp_time_upstream'], time=resp_time_upstream)
+ if 'bucket_index' in self.storage:
+ get_hist(self.storage['bucket_index'], self.storage['upstream_buckets'], resp_time / 1000)
+ # requests per ip proto
+ proto = 'ipv6' if ':' in match_dict['address'] else 'ipv4'
+ self.data['req_' + proto] += 1
+ # unique clients ips
+ if self.configuration.get('all_time', True):
+ if address_not_in_pool(pool=self.storage['unique_all_time'],
+ address=match_dict['address'],
+ pool_size=self.data['unique_tot_ipv4'] + self.data['unique_tot_ipv6']):
+ self.data['unique_tot_' + proto] += 1
+ if match_dict['address'] not in unique_current:
+ self.data['unique_cur_' + proto] += 1
+ unique_current.add(match_dict['address'])
+ else:
+ self.data['unmatched'] += 1
+
+ # timings
+ for elem in timings:
+ self.data[elem + '_min'] += timings[elem]['minimum']
+ self.data[elem + '_avg'] += timings[elem]['summary'] / timings[elem]['count']
+ self.data[elem + '_max'] += timings[elem]['maximum']
+
+ # histogram
+ if 'bucket_index' in self.storage:
+ buckets = self.storage['buckets']
+ upstream_buckets = self.storage['upstream_buckets']
+ for i in range(0, len(self.storage['bucket_index'])):
+ hist_key = 'response_time_hist_%d' % i
+ upstream_hist_key = 'response_time_upstream_hist_%d' % i
+ self.data[hist_key] = buckets[i]
+ self.data[upstream_hist_key] = upstream_buckets[i]
+
+ return self.data
+
+ def find_regex(self, last_line):
+ """
+ :param last_line: str: literally last line from log file
+ :return: tuple where:
+ [0]: dict or None: match_dict or None
+ [1]: str: error description
+ We need to find appropriate pattern for current log file
+ All logic is do a regex search through the string for all predefined patterns
+ until we find something or fail.
+ """
+ # REGEX: 1.IPv4 address 2.HTTP method 3. URL 4. Response code
+ # 5. Bytes sent 6. Response length 7. Response process time
+ default = re.compile(r'(?P<address>[\da-f.:]+|localhost)'
+ r' -.*?"(?P<request>[^"]*)"'
+ r' (?P<code>[1-9]\d{2})'
+ r' (?P<bytes_sent>\d+|-)')
+
+ apache_ext_insert = re.compile(r'(?P<address>[\da-f.:]+|localhost)'
+ r' -.*?"(?P<request>[^"]*)"'
+ r' (?P<code>[1-9]\d{2})'
+ r' (?P<bytes_sent>\d+|-)'
+ r' (?P<resp_length>\d+|-)'
+ r' (?P<resp_time>\d+) ')
+
+ apache_ext_append = re.compile(r'(?P<address>[\da-f.:]+|localhost)'
+ r' -.*?"(?P<request>[^"]*)"'
+ r' (?P<code>[1-9]\d{2})'
+ r' (?P<bytes_sent>\d+|-)'
+ r' .*?'
+ r' (?P<resp_length>\d+|-)'
+ r' (?P<resp_time>\d+)'
+ r'(?: |$)')
+
+ nginx_ext_insert = re.compile(r'(?P<address>[\da-f.:]+)'
+ r' -.*?"(?P<request>[^"]*)"'
+ r' (?P<code>[1-9]\d{2})'
+ r' (?P<bytes_sent>\d+)'
+ r' (?P<resp_length>\d+)'
+ r' (?P<resp_time>\d+\.\d+) ')
+
+ nginx_ext2_insert = re.compile(r'(?P<address>[\da-f.:]+)'
+ r' -.*?"(?P<request>[^"]*)"'
+ r' (?P<code>[1-9]\d{2})'
+ r' (?P<bytes_sent>\d+)'
+ r' (?P<resp_length>\d+)'
+ r' (?P<resp_time>\d+\.\d+)'
+ r' (?P<resp_time_upstream>[\d.-]+) ')
+
+ nginx_ext_append = re.compile(r'(?P<address>[\da-f.:]+)'
+ r' -.*?"(?P<request>[^"]*)"'
+ r' (?P<code>[1-9]\d{2})'
+ r' (?P<bytes_sent>\d+)'
+ r' .*?'
+ r' (?P<resp_length>\d+)'
+ r' (?P<resp_time>\d+\.\d+)')
+
+ def func_usec(time):
+ return time
+
+ def func_sec(time):
+ return time * 1000000
+
+ r_regex = [apache_ext_insert, apache_ext_append,
+ nginx_ext2_insert, nginx_ext_insert, nginx_ext_append,
+ default]
+ r_function = [func_usec, func_usec, func_sec, func_sec, func_sec, func_usec]
+ regex_function = zip(r_regex, r_function)
+
+ match_dict = dict()
+ for regex, func in regex_function:
+ match = regex.search(last_line)
+ if match:
+ self.storage['regex'] = regex
+ self.storage['func_resp_time'] = func
+ match_dict = match.groupdict()
+ break
+
+ return find_regex_return(match_dict=match_dict or None,
+ msg='Unknown log format. You need to use "custom_log_format" feature.')
+
+ def find_regex_custom(self, last_line):
+ """
+ :param last_line: str: literally last line from log file
+ :return: tuple where:
+ [0]: dict or None: match_dict or None
+ [1]: str: error description
+
+ We are here only if "custom_log_format" is in logs. We need to make sure:
+ 1. "custom_log_format" is a dict
+ 2. "pattern" in "custom_log_format" and pattern is <str> instance
+ 3. if "time_multiplier" is in "custom_log_format" it must be <int> or <float> instance
+
+ If all parameters is ok we need to make sure:
+ 1. Pattern search is success
+ 2. Pattern search contains named subgroups (?P<subgroup_name>) (= "match_dict")
+
+ If pattern search is success we need to make sure:
+ 1. All mandatory keys ['address', 'code', 'bytes_sent', 'method', 'url'] are in "match_dict"
+
+ If this is True we need to make sure:
+ 1. All mandatory key values from "match_dict" have the correct format
+ ("code" is integer, "method" is uppercase word, etc)
+
+ If non mandatory keys in "match_dict" we need to make sure:
+ 1. All non mandatory key values from match_dict ['resp_length', 'resp_time'] have the correct format
+ ("resp_length" is integer or "-", "resp_time" is integer or float)
+
+ """
+ if not hasattr(self.configuration.get('custom_log_format'), 'keys'):
+ return find_regex_return(msg='Custom log: "custom_log_format" is not a <dict>')
+
+ pattern = self.configuration.get('custom_log_format', dict()).get('pattern')
+ if not (pattern and isinstance(pattern, str)):
+ return find_regex_return(msg='Custom log: "pattern" option is not specified or type is not <str>')
+
+ resp_time_func = self.configuration.get('custom_log_format', dict()).get('time_multiplier') or 0
+
+ if not isinstance(resp_time_func, (int, float)):
+ return find_regex_return(msg='Custom log: "time_multiplier" is not an integer or a float')
+
+ try:
+ regex = re.compile(pattern)
+ except re.error as error:
+ return find_regex_return(msg='Pattern compile error: %s' % str(error))
+
+ match = regex.search(last_line)
+ if not match:
+ return find_regex_return(msg='Custom log: pattern search FAILED')
+
+ match_dict = match.groupdict() or None
+ if match_dict is None:
+ return find_regex_return(msg='Custom log: search OK but contains no named subgroups'
+ ' (you need to use ?P<subgroup_name>)')
+ mandatory_dict = {'address': r'[\w.:-]+',
+ 'code': r'[1-9]\d{2}',
+ 'bytes_sent': r'\d+|-'}
+ optional_dict = {'resp_length': r'\d+|-',
+ 'resp_time': r'[\d.]+',
+ 'resp_time_upstream': r'[\d.-]+',
+ 'method': r'[A-Z]+',
+ 'http_version': r'\d(?:.\d)?'}
+
+ mandatory_values = set(mandatory_dict) - set(match_dict)
+ if mandatory_values:
+ return find_regex_return(msg='Custom log: search OK but some mandatory keys (%s) are missing'
+ % list(mandatory_values))
+ for key in mandatory_dict:
+ if not re.search(mandatory_dict[key], match_dict[key]):
+ return find_regex_return(msg='Custom log: can\'t parse "%s": %s'
+ % (key, match_dict[key]))
+
+ optional_values = set(optional_dict) & set(match_dict)
+ for key in optional_values:
+ if not re.search(optional_dict[key], match_dict[key]):
+ return find_regex_return(msg='Custom log: can\'t parse "%s": %s'
+ % (key, match_dict[key]))
+
+ dot_in_time = '.' in match_dict.get('resp_time', '')
+ if dot_in_time:
+ self.storage['func_resp_time'] = lambda time: time * (resp_time_func or 1000000)
+ else:
+ self.storage['func_resp_time'] = lambda time: time * (resp_time_func or 1)
+
+ self.storage['regex'] = regex
+ return find_regex_return(match_dict=match_dict)
+
+ def get_data_from_request_field(self, match_dict):
+ if match_dict.get('request'):
+ match_dict = REQUEST_REGEX.search(match_dict['request'])
+ if match_dict:
+ match_dict = match_dict.groupdict()
+ else:
+ return
+ # requests per url
+ if match_dict.get('url') and self.storage['url_pattern']:
+ self.get_data_per_pattern(row=match_dict['url'],
+ other='url_pattern_other',
+ pattern=self.storage['url_pattern'])
+ # requests per http method
+ if match_dict.get('method'):
+ if match_dict['method'] not in self.data:
+ self.charts['http_method'].add_dimension([match_dict['method'],
+ match_dict['method'],
+ 'incremental'])
+ self.data[match_dict['method']] = 0
+ self.data[match_dict['method']] += 1
+ # requests per http version
+ if match_dict.get('http_version'):
+ dim_id = match_dict['http_version'].replace('.', '_')
+ if dim_id not in self.data:
+ self.charts['http_version'].add_dimension([dim_id,
+ match_dict['http_version'],
+ 'incremental'])
+ self.data[dim_id] = 0
+ self.data[dim_id] += 1
+ # requests per port number
+ if match_dict.get('port'):
+ if match_dict['port'] not in self.data:
+ self.charts['port'].add_dimension([match_dict['port'],
+ match_dict['port'],
+ 'incremental'])
+ self.data[match_dict['port']] = 0
+ self.data[match_dict['port']] += 1
+ # requests per vhost
+ if match_dict.get('vhost'):
+ dim_id = match_dict['vhost'].replace('.', '_')
+ if dim_id not in self.data:
+ self.charts['vhost'].add_dimension([dim_id,
+ match_dict['vhost'],
+ 'incremental'])
+ self.data[dim_id] = 0
+ self.data[dim_id] += 1
+
+ def get_data_per_response_codes_detailed(self, code):
+ """
+ :param code: str: CODE from parsed line. Ex.: '202, '499'
+ :return:
+ Calls add_new_dimension method If the value is found for the first time
+ """
+ if code not in self.data:
+ if self.configuration.get('detailed_response_aggregate', True):
+ self.charts['detailed_response_codes'].add_dimension([code, code, 'incremental'])
+ self.data[code] = 0
+ else:
+ code_index = int(code[0]) if int(code[0]) < 6 else 6
+ chart_key = 'detailed_response_codes' + DET_RESP_AGGR[code_index]
+ self.charts[chart_key].add_dimension([code, code, 'incremental'])
+ self.data[code] = 0
+ self.data[code] += 1
+
+ def get_data_per_pattern(self, row, other, pattern):
+ """
+ :param row: str:
+ :param other: str:
+ :param pattern: named tuple: (['pattern_description', 'regular expression'])
+ :return:
+ Scan through string looking for the first location where patterns produce a match for all user
+ defined patterns
+ """
+ match = None
+ for elem in pattern:
+ if elem.func(row):
+ self.data[elem.description] += 1
+ match = True
+ break
+ if not match:
+ self.data[other] += 1
+
+ def get_data_per_statuses(self, code):
+ """
+ :param code: str: response status code. Ex.: '202', '499'
+ :return:
+ """
+ code_class = code[0]
+ if code_class == '2' or code == '304' or code_class == '1':
+ self.data['successful_requests'] += 1
+ elif code_class == '3':
+ self.data['redirects'] += 1
+ elif code_class == '4':
+ self.data['bad_requests'] += 1
+ elif code_class == '5':
+ self.data['server_errors'] += 1
+ else:
+ self.data['other_requests'] += 1
+
+
+class ApacheCache:
+ def __init__(self, service):
+ self.service = service
+ self.order = ORDER_APACHE_CACHE
+ self.definitions = CHARTS_APACHE_CACHE
+
+ @staticmethod
+ def check():
+ return True
+
+ @staticmethod
+ def get_data(raw_data=None):
+ data = dict(hit=0, miss=0, other=0)
+ if not raw_data:
+ return None if raw_data is None else data
+
+ for line in raw_data:
+ if 'cache hit' in line:
+ data['hit'] += 1
+ elif 'cache miss' in line:
+ data['miss'] += 1
+ else:
+ data['other'] += 1
+ return data
+
+
+class Squid:
+ def __init__(self, service):
+ self.service = service
+ self.order = ORDER_SQUID
+ self.definitions = CHARTS_SQUID
+ self.pre_filter = check_patterns('filter', self.configuration.get('filter'))
+ self.storage = dict()
+ self.data = {
+ 'duration_max': 0,
+ 'duration_avg': 0,
+ 'duration_min': 0,
+ 'bytes': 0,
+ '0xx': 0,
+ '1xx': 0,
+ '2xx': 0,
+ '3xx': 0,
+ '4xx': 0,
+ '5xx': 0,
+ 'other': 0,
+ 'unmatched': 0,
+ 'unique_ipv4': 0,
+ 'unique_ipv6': 0,
+ 'unique_tot_ipv4': 0,
+ 'unique_tot_ipv6': 0,
+ 'successful_requests': 0,
+ 'redirects': 0,
+ 'bad_requests': 0,
+ 'server_errors': 0,
+ 'other_requests': 0
+ }
+
+ def __getattr__(self, item):
+ return getattr(self.service, item)
+
+ def check(self):
+ last_line = read_last_line(self.log_path)
+ if not last_line:
+ return False
+ self.storage['unique_all_time'] = list()
+ self.storage['regex'] = re.compile(r'[0-9.]+\s+(?P<duration>[0-9]+)'
+ r' (?P<client_address>[\da-f.:]+)'
+ r' (?P<squid_code>[A-Z_]+)/'
+ r'(?P<http_code>[0-9]+)'
+ r' (?P<bytes>[0-9]+)'
+ r' (?P<method>[A-Z_]+)'
+ r' (?P<url>[^ ]+)'
+ r' (?P<user>[^ ]+)'
+ r' (?P<hier_code>[A-Z_]+)/[\da-z.:-]+'
+ r' (?P<mime_type>[A-Za-z-]*)')
+
+ match = self.storage['regex'].search(last_line)
+ if not match:
+ self.error('Regex not matches (%s)' % self.storage['regex'].pattern)
+ return False
+ self.storage['dynamic'] = {
+ 'http_code': {
+ 'chart': 'squid_detailed_response_codes',
+ 'func_dim_id': None,
+ 'func_dim': None
+ },
+ 'hier_code': {
+ 'chart': 'squid_hier_code',
+ 'func_dim_id': None,
+ 'func_dim': lambda v: v.replace('HIER_', '')
+ },
+ 'method': {
+ 'chart': 'squid_method',
+ 'func_dim_id': None,
+ 'func_dim': None
+ },
+ 'mime_type': {
+ 'chart': 'squid_mime_type',
+ 'func_dim_id': lambda v: str.lower(v) if str.lower(v) in MIME_TYPES else 'unknown',
+ 'func_dim': None
+ }
+ }
+ if not self.configuration.get('all_time', True):
+ self.order.remove('squid_clients_all')
+ return True
+
+ def get_data(self, raw_data=None):
+ if not raw_data:
+ return None if raw_data is None else self.data
+
+ filtered_data = filter_data(raw_data=raw_data, pre_filter=self.pre_filter)
+
+ unique_ip = set()
+ timings = defaultdict(lambda: dict(minimum=None, maximum=0, summary=0, count=0))
+
+ for row in filtered_data:
+ match = self.storage['regex'].search(row)
+ if match:
+ match = match.groupdict()
+ if match['duration'] != '0':
+ get_timings(timings=timings['duration'], time=float(match['duration']) * 1000)
+ try:
+ self.data[match['http_code'][0] + 'xx'] += 1
+ except KeyError:
+ self.data['other'] += 1
+
+ self.get_data_per_statuses(match['http_code'])
+
+ self.get_data_per_squid_code(match['squid_code'])
+
+ self.data['bytes'] += int(match['bytes'])
+
+ proto = 'ipv4' if '.' in match['client_address'] else 'ipv6'
+ # unique clients ips
+ if self.configuration.get('all_time', True):
+ if address_not_in_pool(pool=self.storage['unique_all_time'],
+ address=match['client_address'],
+ pool_size=self.data['unique_tot_ipv4'] + self.data['unique_tot_ipv6']):
+ self.data['unique_tot_' + proto] += 1
+
+ if match['client_address'] not in unique_ip:
+ self.data['unique_' + proto] += 1
+ unique_ip.add(match['client_address'])
+
+ for key, values in self.storage['dynamic'].items():
+ if match[key] == '-':
+ continue
+ dimension_id = values['func_dim_id'](match[key]) if values['func_dim_id'] else match[key]
+ if dimension_id not in self.data:
+ dimension = values['func_dim'](match[key]) if values['func_dim'] else dimension_id
+ self.charts[values['chart']].add_dimension([dimension_id,
+ dimension,
+ 'incremental'])
+ self.data[dimension_id] = 0
+ self.data[dimension_id] += 1
+ else:
+ self.data['unmatched'] += 1
+
+ for elem in timings:
+ self.data[elem + '_min'] += timings[elem]['minimum']
+ self.data[elem + '_avg'] += timings[elem]['summary'] / timings[elem]['count']
+ self.data[elem + '_max'] += timings[elem]['maximum']
+ return self.data
+
+ def get_data_per_statuses(self, code):
+ """
+ :param code: str: response status code. Ex.: '202', '499'
+ :return:
+ """
+ code_class = code[0]
+ if code_class == '2' or code == '304' or code_class == '1' or code == '000':
+ self.data['successful_requests'] += 1
+ elif code_class == '3':
+ self.data['redirects'] += 1
+ elif code_class == '4':
+ self.data['bad_requests'] += 1
+ elif code_class == '5' or code_class == '6':
+ self.data['server_errors'] += 1
+ else:
+ self.data['other_requests'] += 1
+
+ def get_data_per_squid_code(self, code):
+ """
+ :param code: str: squid response code. Ex.: 'TCP_MISS', 'TCP_MISS_ABORTED'
+ :return:
+ """
+ if code not in self.data:
+ self.charts['squid_code'].add_dimension([code, code, 'incremental'])
+ self.data[code] = 0
+ self.data[code] += 1
+
+ for tag in code.split('_'):
+ try:
+ chart_key = SQUID_CODES[tag]
+ except KeyError:
+ continue
+ dimension_id = '_'.join(['code_detailed', tag])
+ if dimension_id not in self.data:
+ self.charts[chart_key].add_dimension([dimension_id, tag, 'incremental'])
+ self.data[dimension_id] = 0
+ self.data[dimension_id] += 1
+
+
+def get_timings(timings, time):
+ """
+ :param timings:
+ :param time:
+ :return:
+ """
+ if timings['minimum'] is None:
+ timings['minimum'] = time
+ if time > timings['maximum']:
+ timings['maximum'] = time
+ elif time < timings['minimum']:
+ timings['minimum'] = time
+ timings['summary'] += time
+ timings['count'] += 1
+
+
+def get_hist(index, buckets, time):
+ """
+ :param index: histogram index (Ex. [10, 50, 100, 150, ...])
+ :param buckets: histogram buckets
+ :param time: time
+ :return: None
+ """
+ for i in range(len(index)-1, -1, -1):
+ if time <= index[i]:
+ buckets[i] += 1
+ else:
+ break
+
+
+def address_not_in_pool(pool, address, pool_size):
+ """
+ :param pool: list of ip addresses
+ :param address: ip address
+ :param pool_size: current pool size
+ :return: True if address not in pool. False otherwise.
+ """
+ index = bisect.bisect_left(pool, address)
+ if index < pool_size:
+ if pool[index] == address:
+ return False
+ bisect.insort_left(pool, address)
+ return True
+ bisect.insort_left(pool, address)
+ return True
+
+
+def find_regex_return(match_dict=None, msg='Generic error message'):
+ """
+ :param match_dict: dict: re.search.groupdict() or None
+ :param msg: str: error description
+ :return: tuple:
+ """
+ return match_dict, msg
+
+
+def check_patterns(string, dimension_regex_dict):
+ """
+ :param string: str:
+ :param dimension_regex_dict: dict: ex. {'dim1': '<pattern1>', 'dim2': '<pattern2>'}
+ :return: list of named tuples or None:
+ We need to make sure all patterns are valid regular expressions
+ """
+ if not hasattr(dimension_regex_dict, 'keys'):
+ return None
+
+ result = list()
+
+ def valid_pattern(pattern):
+ """
+ :param pattern: str
+ :return: re.compile(pattern) or None
+ """
+ if not isinstance(pattern, str):
+ return False
+ try:
+ return re.compile(pattern)
+ except re.error:
+ return False
+
+ def func_search(pattern):
+ def closure(v):
+ return pattern.search(v)
+
+ return closure
+
+ for dimension, regex in dimension_regex_dict.items():
+ valid = valid_pattern(regex)
+ if isinstance(dimension, str) and valid_pattern:
+ func = func_search(valid)
+ result.append(NAMED_PATTERN(description='_'.join([string, dimension]),
+ func=func))
+ return result or None
+
+
+def filter_data(raw_data, pre_filter):
+ """
+ :param raw_data:
+ :param pre_filter:
+ :return:
+ """
+
+ if not pre_filter:
+ return raw_data
+ filtered = raw_data
+ for elem in pre_filter:
+ if elem.description == 'filter_include':
+ filtered = filter(elem.func, filtered)
+ elif elem.description == 'filter_exclude':
+ filtered = filterfalse(elem.func, filtered)
+ return filtered
diff --git a/collectors/python.d.plugin/web_log/web_log.conf b/collectors/python.d.plugin/web_log/web_log.conf
new file mode 100644
index 000000000..a67957aef
--- /dev/null
+++ b/collectors/python.d.plugin/web_log/web_log.conf
@@ -0,0 +1,206 @@
+# netdata python.d.plugin configuration for web log
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+
+# ----------------------------------------------------------------------
+# PLUGIN CONFIGURATION
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, web_log also supports the following:
+#
+# path: 'PATH' # the path to web server log file
+# path: 'PATH[0-9]*[0-9]' # log files with date suffix are also supported
+# detailed_response_codes: yes/no # default: yes. Additional chart where response codes are not grouped
+# detailed_response_aggregate: yes/no # default: yes. Not aggregated detailed response codes charts
+# all_time : yes/no # default: yes. All time unique client IPs chart (50000 addresses ~ 400KB)
+# filter: # filter with regex
+# include: 'REGEX' # only those rows that matches the regex
+# exclude: 'REGEX' # all rows except those that matches the regex
+# categories: # requests per url chart configuration
+# cacti: 'cacti.*' # name(dimension): REGEX to match
+# observium: 'observium.*' # name(dimension): REGEX to match
+# stub_status: 'stub_status' # name(dimension): REGEX to match
+# user_defined: # requests per pattern in <user_defined> field (custom_log_format)
+# cacti: 'cacti.*' # name(dimension): REGEX to match
+# observium: 'observium.*' # name(dimension): REGEX to match
+# stub_status: 'stub_status' # name(dimension): REGEX to match
+# custom_log_format: # define a custom log format
+# pattern: '(?P<address>[\da-f.:]+) -.*?"(?P<method>[A-Z]+) (?P<url>.*?)" (?P<code>[1-9]\d{2}) (?P<bytes_sent>\d+) (?P<resp_length>\d+) (?P<resp_time>\d+\.\d+) '
+# time_multiplier: 1000000 # type <int>/<float> - convert time to microseconds
+# histogram: [1,3,10,30,100, ...] # type list of int - Cumulative histogram of response time in milli seconds
+
+# ----------------------------------------------------------------------
+# WEB SERVER CONFIGURATION
+#
+# Make sure the web server log directory and the web server log files
+# can be read by user 'netdata'.
+#
+# To enable the timings chart and the requests size dimension, the
+# web server needs to log them. This is how to add them:
+#
+# nginx:
+# log_format netdata '$remote_addr - $remote_user [$time_local] '
+# '"$request" $status $body_bytes_sent '
+# '$request_length $request_time $upstream_response_time '
+# '"$http_referer" "$http_user_agent"';
+# access_log /var/log/nginx/access.log netdata;
+#
+# apache (you need mod_logio enabled):
+# LogFormat "%h %l %u %t \"%r\" %>s %O %I %D \"%{Referer}i\" \"%{User-Agent}i\"" vhost_netdata
+# LogFormat "%h %l %u %t \"%r\" %>s %O %I %D \"%{Referer}i\" \"%{User-Agent}i\"" netdata
+# CustomLog "/var/log/apache2/access.log" netdata
+
+# ----------------------------------------------------------------------
+# VHOST AND PORT
+# if your want to graph the request/sec per virtual host and per port (to check the number of requests in http vs https)
+
+# in apache : (%v gives the hostname, %p the port number)
+# LogFormat "%v %p %h %t \"%r\" %>s %O %I %D \"%{Referer}i\" \"%{User-Agent}i\"" vhost_netdata
+#
+# and in this file in apache_vhosts_log section, add :
+# custom_log_format:
+# pattern: '(?P<vhost>[a-zA-Z\d.-_]+) (?P<port>\d+) (?P<address>[\da-f.:]+) \[.*\] "(?P<method>[A-Z]+)[^"]*" (?P<code>[1-9]\d{2}) (?P<bytes_sent>\d+) (?P<resp_length>\d+) (?P<resp_time>\d+)'
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them per web server will run (when they have the same name)
+
+
+# -------------------------------------------
+# nginx log on various distros
+
+# debian, arch
+nginx_log:
+ name: 'nginx'
+ path: '/var/log/nginx/access.log'
+
+# gentoo
+nginx_log2:
+ name: 'nginx'
+ path: '/var/log/nginx/localhost.access_log'
+
+
+# -------------------------------------------
+# apache log on various distros
+
+# debian
+apache_log:
+ name: 'apache'
+ path: '/var/log/apache2/access.log'
+
+# gentoo
+apache_log2:
+ name: 'apache'
+ path: '/var/log/apache2/access_log'
+
+# arch
+apache_log3:
+ name: 'apache'
+ path: '/var/log/httpd/access_log'
+
+# debian
+apache_vhosts_log:
+ name: 'apache_vhosts'
+ path: '/var/log/apache2/other_vhosts_access.log'
+
+
+# -------------------------------------------
+# gunicorn log on various distros
+
+gunicorn_log:
+ name: 'gunicorn'
+ path: '/var/log/gunicorn/access.log'
+
+gunicorn_log2:
+ name: 'gunicorn'
+ path: '/var/log/gunicorn/gunicorn-access.log'
+
+# -------------------------------------------
+# Apache Cache
+apache_cache:
+ name: 'apache_cache'
+ type: 'apache_cache'
+ path: '/var/log/apache/cache.log'
+
+apache2_cache:
+ name: 'apache_cache'
+ type: 'apache_cache'
+ path: '/var/log/apache2/cache.log'
+
+httpd_cache:
+ name: 'apache_cache'
+ type: 'apache_cache'
+ path: '/var/log/httpd/cache.log'
+
+# -------------------------------------------
+# Squid
+
+# debian/ubuntu
+squid_log1:
+ name: 'squid'
+ type: 'squid'
+ path: '/var/log/squid3/access.log'
+
+#gentoo
+squid_log2:
+ name: 'squid'
+ type: 'squid'
+ path: '/var/log/squid/access.log'
diff --git a/collectors/statsd.plugin/Makefile.am b/collectors/statsd.plugin/Makefile.am
new file mode 100644
index 000000000..7f09bacd7
--- /dev/null
+++ b/collectors/statsd.plugin/Makefile.am
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+statsdconfigdir=$(libconfigdir)/statsd.d
+dist_statsdconfig_DATA = \
+ $(top_srcdir)/installer/.keep \
+ example.conf \
+ $(NULL)
+
+userstatsdconfigdir=$(configdir)/statsd.d
+dist_userstatsdconfig_DATA = \
+ $(top_srcdir)/installer/.keep \
+ $(NULL)
+
diff --git a/collectors/statsd.plugin/Makefile.in b/collectors/statsd.plugin/Makefile.in
new file mode 100644
index 000000000..5c16a86d1
--- /dev/null
+++ b/collectors/statsd.plugin/Makefile.in
@@ -0,0 +1,556 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = collectors/statsd.plugin
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
+ $(dist_noinst_DATA) $(dist_statsdconfig_DATA) \
+ $(dist_userstatsdconfig_DATA)
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+ test -z "$$files" \
+ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+ $(am__cd) "$$dir" && rm -f $$files; }; \
+ }
+am__installdirs = "$(DESTDIR)$(statsdconfigdir)" \
+ "$(DESTDIR)$(userstatsdconfigdir)"
+DATA = $(dist_noinst_DATA) $(dist_statsdconfig_DATA) \
+ $(dist_userstatsdconfig_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+statsdconfigdir = $(libconfigdir)/statsd.d
+dist_statsdconfig_DATA = \
+ $(top_srcdir)/installer/.keep \
+ example.conf \
+ $(NULL)
+
+userstatsdconfigdir = $(configdir)/statsd.d
+dist_userstatsdconfig_DATA = \
+ $(top_srcdir)/installer/.keep \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/statsd.plugin/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu collectors/statsd.plugin/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+install-dist_statsdconfigDATA: $(dist_statsdconfig_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_statsdconfig_DATA)'; test -n "$(statsdconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(statsdconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(statsdconfigdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(statsdconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(statsdconfigdir)" || exit $$?; \
+ done
+
+uninstall-dist_statsdconfigDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_statsdconfig_DATA)'; test -n "$(statsdconfigdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(statsdconfigdir)'; $(am__uninstall_files_from_dir)
+install-dist_userstatsdconfigDATA: $(dist_userstatsdconfig_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_userstatsdconfig_DATA)'; test -n "$(userstatsdconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(userstatsdconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(userstatsdconfigdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(userstatsdconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(userstatsdconfigdir)" || exit $$?; \
+ done
+
+uninstall-dist_userstatsdconfigDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_userstatsdconfig_DATA)'; test -n "$(userstatsdconfigdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(userstatsdconfigdir)'; $(am__uninstall_files_from_dir)
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+ for dir in "$(DESTDIR)$(statsdconfigdir)" "$(DESTDIR)$(userstatsdconfigdir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am: install-dist_statsdconfigDATA \
+ install-dist_userstatsdconfigDATA
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-dist_statsdconfigDATA \
+ uninstall-dist_userstatsdconfigDATA
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dist_statsdconfigDATA \
+ install-dist_userstatsdconfigDATA install-dvi install-dvi-am \
+ install-exec install-exec-am install-html install-html-am \
+ install-info install-info-am install-man install-pdf \
+ install-pdf-am install-ps install-ps-am install-strip \
+ installcheck installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am \
+ uninstall-dist_statsdconfigDATA \
+ uninstall-dist_userstatsdconfigDATA
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/collectors/statsd.plugin/README.md b/collectors/statsd.plugin/README.md
new file mode 100644
index 000000000..6ef038343
--- /dev/null
+++ b/collectors/statsd.plugin/README.md
@@ -0,0 +1,523 @@
+# Netdata Statsd
+
+statsd is a system to collect data from any application. Applications are sending metrics to it, usually via non-blocking UDP communication, and statsd servers collect these metrics, perform a few simple calculations on them and push them to backend time-series databases.
+
+There is a [plethora of client libraries](https://github.com/etsy/statsd/wiki#client-implementations) for embedding statsd metrics to any application framework. This makes statsd quite popular for custom application metrics.
+
+## netdata statsd
+
+netdata is a fully featured statsd server. It can collect statsd formatted metrics, visualize them on its dashboards, stream them to other netdata servers or archive them to backend time-series databases.
+
+netdata statsd is inside netdata (an internal plugin, running inside the netdata daemon), it is configured via `netdata.conf` and by-default listens on standard statsd ports (tcp and udp 8125 - yes, netdata statsd server supports both tcp and udp at the same time).
+
+Since statsd is embedded in netdata, it means you now have a statsd server embedded on all your servers. So, the application can send its metrics to `localhost:8125`. This provides a distributed statsd implementation.
+
+netdata statsd is fast. It can collect more than **1.200.000 metrics per second** on modern hardware, more than **200Mbps of sustained statsd traffic**, using 1 CPU core (yes, it is single threaded - actually double-threaded, one thread collects metrics, another one updates the charts from the collected data).
+
+## metrics supported by netdata
+
+netdata fully supports the statsd protocol. All statsd client libraries can be used with netdata too.
+
+- **Gauges**
+
+ The application sends `name:value|g`, where `value` is any **decimal/fractional** number, statsd reports the latest value collected and the number of times it was updated (events).
+
+ The application may increment or decrement a previous value, by setting the first character of the value to ` + ` or ` - ` (so, the only way to set a gauge to an absolute negative value, is to first set it to zero).
+
+ Sampling rate is supported (check below).
+
+ When a gauge is not collected and the setting is not to show gaps on the charts (the default), the last value will be shown, until a data collection event changes it.
+
+- **Counters** and **Meters**
+
+ The application sends `name:value|c`, `name:value|C` or `name:value|m`, where `value` is a positive or negative **integer** number of events occurred, statsd reports the **rate** and the number of times it was updated (events).
+
+ `:value` can be omitted and statsd will assume it is `1`. `|c`, `|C` and `|m` can be omitted an statsd will assume it is `|m`. So, the application may send just `name` and statsd will parse it as `name:1|m`.
+
+ For counters use `|c` (esty/statsd compatible) or `|C` (brubeck compatible), for meters use `|m`.
+
+ Sampling rate is supported (check below).
+
+ When a counter or meter is not collected and the setting is not to show gaps on the charts (the default), zero will be shown, until a data collection event changes it.
+
+- **Timers** and **Histograms**
+
+ The application sends `name:value|ms` or `name:value|h`, where ` value` is any **decimal/fractional** number, statsd reports **min**, **max**, **average**, **sum**, **95th percentile**, **median** and **standard deviation** and the total number of times it was updated (events).
+
+ For timers use `|ms`, or histograms use `|h`. The only difference between the two, is the `units` of the charts (timers report milliseconds).
+
+ Sampling rate is supported (check below).
+
+ When a timer or histogram is not collected and the setting is not to show gaps on the charts (the default), zero will be shown, until a data collection event changes it.
+
+- **Sets**
+
+ The application sends `name:value|s`, where `value` is anything (**number or text**, leading and trailing spaces are removed), statsd reports the number of unique values sent and the number of times it was updated (events).
+
+ Sampling rate is **not** supported for Sets. `value` is always considered text.
+
+ When a set is not collected and the setting is not to show gaps on the charts (the default), zero will be shown, until a data collection event changes it.
+
+#### Sampling Rates
+
+The application may append `|@sampling_rate`, where `sampling_rate` is a number from `0.0` to `1.0`, to have statsd extrapolate the value, to predict to total for the whole period. So, if the application reports to statsd a value for 1/10th of the time, it can append `|@0.1` to the metrics it sends to statsd.
+
+#### Overlapping metrics
+
+netdata statsd maintains different indexes for each of the types supported. This means the same metric `name` may exist under different types concurrently.
+
+#### Multiple metrics per packet
+
+netdata accepts multiple metrics per packet if each is terminated with `\n`.
+
+#### TCP packets
+
+netdata listens for both TCP and UDP packets. For TCP though, is it important to always append `\n` on each metric. netdata uses this to detect if a metric is split into multiple TCP packets. On disconnect, even the remaining (non terminated with `\n`) buffer, is processed.
+
+#### UDP packets
+
+When sending multiple packets over UDP, it is important not to exceed the network MTU (usually 1500 bytes minus a few bytes for the headers). netdata will accept UDP packets up to 9000 bytes, but the underlying network will not exceed MTU.
+
+## configuration
+
+This is the statsd configuration at `/etc/netdata/netdata.conf`:
+
+```
+[statsd]
+ # enabled = yes
+ # decimal detail = 1000
+ # update every (flushInterval) = 1
+ # udp messages to process at once = 10
+ # create private charts for metrics matching = *
+ # max private charts allowed = 200
+ # max private charts hard limit = 1000
+ # private charts memory mode = save
+ # private charts history = 3996
+ # histograms and timers percentile (percentThreshold) = 95.00000
+ # add dimension for number of events received = yes
+ # gaps on gauges (deleteGauges) = no
+ # gaps on counters (deleteCounters) = no
+ # gaps on meters (deleteMeters) = no
+ # gaps on sets (deleteSets) = no
+ # gaps on histograms (deleteHistograms) = no
+ # gaps on timers (deleteTimers) = no
+ # listen backlog = 4096
+ # default port = 8125
+ # bind to = udp:localhost:8125 tcp:localhost:8125
+```
+
+### statsd main config options
+- `enabled = yes|no`
+
+ controls if statsd will be enabled for this netdata. The default is enabled.
+
+- `default port = 8125`
+
+ controls the port statsd will use. This is the default, since the next line, allows defining ports too.
+
+- `bind to = udp:localhost tcp:localhost`
+
+ is a space separated list of IPs and ports to listen to. The format is `PROTOCOL:IP:PORT` - if `PORT` is omitted, the `default port` will be used. If `IP` is IPv6, it needs to be enclosed in `[]`. `IP` can also be ` * ` (to listen on all IPs) or even a hostname.
+
+- `update every (flushInterval) = 1` seconds, controls the frequency statsd will push the collected metrics to netdata charts.
+
+- `decimal detail = 1000` controls the number of fractional digits in gauges and histograms. netdata collects metrics using signed 64 bit integers and their fractional detail is controlled using multipliers and divisors. This setting is used to multiply all collected values to convert them to integers and is also set as the divisors, so that the final data will be a floating point number with this fractional detail (1000 = X.0 - X.999, 10000 = X.0 - X.9999, etc).
+
+The rest of the settings are discussed below.
+
+## statsd charts
+
+netdata can visualize statsd collected metrics in 2 ways:
+
+1. Each metric gets its own **private chart**. This is the default and does not require any configuration (although there are a few options to tweak).
+
+2. **Synthetic charts** can be created, combining multiple metrics, independently of their metric types. For this type of charts, special configuration is required, to define the chart title, type, units, its dimensions, etc.
+
+### private metric charts
+
+Private charts are controlled with `create private charts for metrics matching = *`. This setting accepts a space separated list of simple patterns (use `*` as wildcard, prepend a pattern with `!` for a negative match, the order of patterns is important).
+
+So to render charts for all `myapp.*` metrics, except `myapp.*.badmetric`, use:
+
+```
+create private charts for metrics matching = !myapp.*.badmetric myapp.*
+```
+
+The default is to render private charts for all metrics.
+
+The `memory mode` of the round robin database and the `history` of private metric charts are controlled with `private charts memory mode` and `private charts history`. The defaults for both settings is to use the global netdata settings. So, you need to edit them only when you want statsd to use different settings compared to the global ones.
+
+If you have thousands of metrics, each with its own private chart, you may notice that your web browser becomes slow when you view the netdata dashboard (this is a web browser issue we need to address at the netdata UI). So, netdata has a protection to stop creating charts when `max private charts allowed = 200` (soft limit) is reached.
+
+The metrics above this soft limit are still processed by netdata and will be available to be sent to backend time-series databases, up to `max private charts hard limit = 1000`. So, between 200 and 1000 charts, netdata will still generate charts, but they will automatically be created with `memory mode = none` (netdata will not maintain a database for them). These metrics will be sent to backend time series databases, if the backend configuration is set to `as collected`.
+
+Metrics above the hard limit are still collected, but they can only be used in synthetic charts (once a metric is added to chart, it will be sent to backend servers too).
+
+Example private charts (automatically generated without any configuration):
+
+#### counters
+
+- Scope: **count the events of something** (e.g. number of file downloads)
+- Format: `name:INTEGER|c` or `name:INTEGER|C` or `name|c`
+- statsd increments the counter by the `INTEGER` number supplied (positive, or negative).
+
+![image](https://cloud.githubusercontent.com/assets/2662304/26131553/4a26d19c-3aa3-11e7-94e8-c53b5ed6ebc3.png)
+
+#### gauges
+
+- Scope: **report the value of something** (e.g. cache memory used by the application server)
+- Format: `name:FLOAT|g`
+- statsd remembers the last value supplied, and can increment or decrement the latest value if `FLOAT` begins with ` + ` or ` - `.
+
+![image](https://cloud.githubusercontent.com/assets/2662304/26131575/5d54e6f0-3aa3-11e7-9099-bc4440cd4592.png)
+
+#### histograms
+
+- Scope: **statistics on a size of events** (e.g. statistics on the sizes of files downloaded)
+- Format: `name:FLOAT|h`
+- statsd maintains a list of all the values supplied and provides statistics on them.
+
+![image](https://cloud.githubusercontent.com/assets/2662304/26131587/704de72a-3aa3-11e7-9ea9-0d2bb778c150.png)
+
+The same chart with `sum` unselected, to show the detail of the dimensions supported:
+![image](https://cloud.githubusercontent.com/assets/2662304/26131598/8076443a-3aa3-11e7-9ffa-ea535aee9c9f.png)
+
+#### meters
+
+This is identical to `counter`.
+
+- Scope: **count the events of something** (e.g. number of file downloads)
+- Format: `name:INTEGER|m` or `name|m` or just `name`
+- statsd increments the counter by the `INTEGER` number supplied (positive, or negative).
+
+![image](https://cloud.githubusercontent.com/assets/2662304/26131605/8fdf5a06-3aa3-11e7-963f-7ecf207d1dbc.png)
+
+#### sets
+
+- Scope: **count the unique occurrences of something** (e.g. unique filenames downloaded, or unique users that downloaded files)
+- Format: `name:TEXT|s`
+- statsd maintains a unique index of all values supplied, and reports the unique entries in it.
+
+![image](https://cloud.githubusercontent.com/assets/2662304/26131612/9eaa7b1a-3aa3-11e7-903b-d881e9a35be2.png)
+
+#### timers
+
+- Scope: **statistics on the duration of events** (e.g. statistics for the duration of file downloads)
+- Format: `name:FLOAT|ms`
+- statsd maintains a list of all the values supplied and provides statistics on them.
+
+![image](https://cloud.githubusercontent.com/assets/2662304/26131620/acbea6a4-3aa3-11e7-8bdd-4a8996847767.png)
+
+The same chart with the `sum` unselected:
+![image](https://cloud.githubusercontent.com/assets/2662304/26131629/bc34f2d2-3aa3-11e7-8a07-f2fc94ba4352.png)
+
+
+
+### synthetic statsd charts
+
+Using synthetic charts, you can create dedicated sections on the dashboard to render the charts. You can control everything: the main menu, the submenus, the charts, the dimensions on each chart, etc.
+
+Synthetic charts are organized in
+
+- **applications** (i.e. entries at the main menu of the netdata dashboard)
+- **charts for each application** (grouped in families - i.e. submenus at the dashboard menu)
+- **statsd metrics for each chart** (i.e. dimensions of the charts)
+
+For each application you need to create a `.conf` file in `/etc/netdata/statsd.d`.
+
+So, to create the statsd application `myapp`, you can create the file `/etc/netdata/statsd.d/myapp.conf`, with this content:
+
+```
+[app]
+ name = myapp
+ metrics = myapp.*
+ private charts = no
+ gaps when not collected = no
+ memory mode = ram
+ history = 60
+
+[dictionary]
+ m1 = metric1
+ m2 = metric2
+
+# replace 'mychart' with the chart id
+# the chart will be named: myapp.mychart
+[mychart]
+ name = mychart
+ title = my chart title
+ family = my family
+ context = chart.context
+ units = tests/s
+ priority = 91000
+ type = area
+ dimension = myapp.metric1 m1
+ dimension = myapp.metric2 m2
+```
+
+Using the above configuration `myapp` should get its own section on the dashboard, having one chart with 2 dimensions.
+
+`[app]` starts a new application definition. The supported settings in this section are:
+
+- `name` defines the name of the app.
+- `metrics` is a netdata simple pattern (space separated patterns, using `*` for wildcard, possibly starting with `!` for negative match). This pattern should match all the possible statsd metrics that will be participating in the application `myapp`.
+- `private charts = yes|no`, enables or disables private charts for the metrics matched.
+- `gaps when not collected = yes|no`, enables or disables gaps on the charts of the application, when metrics are not collected.
+- `memory mode` sets the memory mode for all charts of the application. The default is the global default for netdata (not the global default for statsd private charts).
+- `history` sets the size of the round robin database for this application. The default is the global default for netdata (not the global default for statsd private charts).
+
+`[dictionary]` defines name-value associations. These are used to renaming metrics, when added to synthetic charts. Metric names are also defined at each `dimension` line. However, using the dictionary dimension names can be declared globally, for each app and is the only way to rename dimensions when using patterns. Of course the dictionary can be empty or missing.
+
+Then, you can add any number of charts. Each chart should start with `[id]`. The chart will be called `app_name.id`. `family` controls the submenu on the dashboard. `context` controls the alarm templates. `priority` controls the ordering of the charts on the dashboard. The rest of the settings are informational.
+
+You can add any number of metrics to a chart, using `dimension` lines. These lines accept 5 space separated parameters:
+
+1. the metric name, as it is collected (it has to be matched by the `metrics = ` pattern of the app)
+2. the dimension name, as it should be shown on the chart
+3. an optional selector (type) of the value to shown (see below)
+4. an optional multiplier
+5. an optional divider
+6. optional flags, space separated and enclosed in quotes. All the external plugins `DIMENSION` flags can be used. Currently the only usable flag is `hidden`, to add the dimension, but not show it on the dashboard. This is usually needed to have the values available for percentage calculation, or use them in alarms.
+
+So, the format is this:
+```
+dimension = [pattern] METRIC NAME TYPE MULTIPLIER DIVIDER OPTIONS
+```
+
+`pattern` is a keyword. When set, `METRIC` is expected to be a netdata simple pattern that will be used to match all the statsd metrics to be added to the chart. So, `pattern` automatically matches any number of statsd metrics, all of which will be added as separate chart dimensions.
+
+`TYPE`, `MUTLIPLIER`, `DIVIDER` and `OPTIONS` are optional.
+
+`TYPE` can be:
+
+- `events` to show the number of events received by statsd for this metric
+- `last` to show the last value, as calculated at the flush interval of the metric (the default)
+
+Then for histograms and timers the following types are also supported:
+
+- `min`, show the minimum value
+- `max`, show the maximum value
+- `sum`, show the sum of all values
+- `average` (same as `last`)
+- `percentile`, show the 95th percentile (or any other percentile, as configured at statsd global config)
+- `median`, show the median of all values (i.e. sort all values and get the middle value)
+- `stddev`, show the standard deviation of the values
+
+#### example synthetic charts
+
+statsd metrics: `foo` and `bar`.
+
+Contents of file `/etc/netdata/stats.d/foobar.conf`:
+
+```
+[app]
+ name = foobarapp
+ metrics = foo bar
+ private charts = yes
+
+[foobar_chart1]
+ title = Hey, foo and bar together
+ family = foobar_family
+ context = foobarapp.foobars
+ units = foobars
+ type = area
+ dimension = foo 'foo me' last 1 1
+ dimension = bar 'bar me' last 1 1
+```
+
+I sent to statsd: `foo:10|g` and `bar:20|g`.
+
+I got these private charts:
+
+![screenshot from 2017-08-03 23-28-19](https://user-images.githubusercontent.com/2662304/28942295-7c3a73a8-78a3-11e7-88e5-a9a006bb7465.png)
+
+and this synthetic chart:
+
+![screenshot from 2017-08-03 23-29-14](https://user-images.githubusercontent.com/2662304/28942317-958a2c68-78a3-11e7-853f-32850141dd36.png)
+
+#### dictionary to name dimensions
+
+The `[dictionary]` section accepts any number of `name = value` pairs.
+
+netdata uses this dictionary as follows:
+
+1. When a `dimension` has a non-empty `NAME`, that name is looked up at the dictionary.
+
+2. If the above lookup gives nothing, or the `dimension` has an empty `NAME`, the original statsd metric name is looked up at the dictionary.
+
+3. If any of the above succeeds, netdata uses the `value` of the dictionary, to set the name of the dimension. The dimensions will have as ID the original statsd metric name, and as name, the dictionary value.
+
+So, you can use the dictionary in 2 ways:
+
+1. set `dimension = myapp.metric1 ''` and have at the dictionary `myapp.metric1 = metric1 name`
+2. set `dimension = myapp.metric1 'm1'` and have at the dictionary `m1 = metric1 name`
+
+In both cases, the dimension will be added with ID `myapp.metric1` and will be named `metric1 name`. So, in alarms you can use either of the 2 as `${myapp.metric1}` or `${metric1 name}`.
+
+> keep in mind that if you add multiple times the same statsd metric to a chart, netdata will append `TYPE` to the dimension ID, so `myapp.metric1` will be added as `myapp.metric1_last` or `myapp.metric1_events`, etc. If you add multiple times the same metric with the same `TYPE` to a chart, netdata will also append an incremental counter to the dimension ID, i.e. `myapp.metric1_last1`, `myapp.metric1_last2`, etc.
+
+#### dimension patterns
+
+netdata allows adding multiple dimensions to a chart, by matching the statsd metrics with a netdata simple pattern.
+
+Assume we have an API that provides statsd metrics for each response code per method it supports, like these:
+
+```
+myapp.api.get.200
+myapp.api.get.400
+myapp.api.get.500
+myapp.api.del.200
+myapp.api.del.400
+myapp.api.del.500
+myapp.api.post.200
+myapp.api.post.400
+myapp.api.post.500
+myapp.api.all.200
+myapp.api.all.400
+myapp.api.all.500
+```
+
+To add all response codes of `myapp.api.get` to a chart use this:
+
+```
+[api_get_responses]
+ ...
+ dimension = pattern 'myapp.api.get.* '' last 1 1
+```
+
+The above will add dimension named `200`, `400` and `500` (yes, netdata extracts the wildcarded part of the metric name - so the dimensions will be named with whatever the `*` matched). You can rename the dimensions with this:
+
+```
+[dictionary]
+ get.200 = 200 ok
+ get.400 = 400 bad request
+ get.500 = 500 cannot connect to db
+
+[api_get_responses]
+ ...
+ dimension = pattern 'myapp.api.get.* 'get.' last 1 1
+```
+
+Note that we added a `NAME` to the dimension line with `get.`. This is prefixed to the wildcarded part of the metric name, to compose the key for looking up the dictionary. So `500` became `get.500` which was looked up to the dictionary to find value `500 cannot connect to db`. This way we can have different dimension names, for each of the API methods (i.e. `get.500 = 500 cannot connect to db` while `post.500 = 500 cannot write to disk`).
+
+To add all API methods to a chart, do this:
+
+```
+[ok_by_method]
+ ...
+ dimension = pattern 'myapp.api.*.200 '' last 1 1
+```
+
+The above will add `get`, `post`, `del` and `all` to the chart.
+
+If `all` is not wanted (a `stacked` chart does not need the `all` dimension, since the sum of the dimensions provides the total), the line should be:
+
+```
+[ok_by_method]
+ ...
+ dimension = pattern '!myapp.api.all.* myapp.api.*.200 '' last 1 1
+```
+
+With the above, all methods except `all` will be added to the chart.
+
+To automatically rename the methods, use this:
+
+```
+[dictionary]
+ method.get = GET
+ method.post = ADD
+ method.del = DELETE
+
+[ok_by_method]
+ ...
+ dimension = pattern '!myapp.api.all.* myapp.api.*.200 'method.' last 1 1
+```
+
+Using the above, the dimensions will be added as `GET`, `ADD` and `DELETE`.
+
+
+## interpolation
+
+~~If you send just one value to statsd, you will notice that the chart is created but no value is shown. The reason is that netdata interpolates all values at second boundaries. For incremental values (`counters` and `meters` in statsd terminology), if you send 10 at 00:00:00.500, 20 at 00:00:01.500 and 30 at 00:00:02.500, netdata will show 15 at 00:00:01 and 25 at 00:00:02.~~
+
+~~This interpolation is automatic and global in netdata for all charts, for incremental values. This means that for the chart to start showing values you need to send 2 values across 2 flush intervals.~~
+
+~~(although this is required for incremental values, netdata allows mixing incremental and absolute values on the same charts, so this little limitation [i.e. 2 values to start visualization], is applied on all netdata dimensions).~~
+
+(statsd metrics do not loose their first data collection due to interpolation anymore - fixed with [PR #2411](https://github.com/netdata/netdata/pull/2411))
+
+## sending statsd metrics from shell scripts
+
+You can send/update statsd metrics from shell scripts. You can use this feature, to visualize in netdata automated jobs you run on your servers.
+
+The command you need to run is:
+
+```sh
+echo "NAME:VALUE|TYPE" | nc -u --send-only localhost 8125
+```
+
+Where:
+
+- `NAME` is the metric name
+- `VALUE` is the value for that metric (**gauges** `|g`, **timers** `|ms` and **histograms** `|h` accept decimal/fractional numbers, **counters** `|c` and **meters** `|m` accept integers, **sets** `|s` accept anything)
+- `TYPE` is one of `g`, `ms`, `h`, `c`, `m`, `s` to select the metric type.
+
+So, to set `metric1` as gauge to value `10`, use:
+
+```sh
+echo "metric1:10|g" | nc -u --send-only localhost 8125
+```
+
+To increment `metric2` by `10`, as a counter, use:
+
+```sh
+echo "metric2:10|c" | nc -u --send-only localhost 8125
+```
+
+You can send multiple metrics like this:
+
+```sh
+# send multiple metrics via UDP
+printf "metric1:10|g\nmetric2:10|c\n" | nc -u --send-only localhost 8125
+```
+
+Remember, for UDP communication each packet should not exceed the MTU. So, if you plan to push too many metrics at once, prefer TCP communication:
+
+```sh
+# send multiple metrics via TCP
+printf "metric1:10|g\nmetric2:10|c\n" | nc --send-only localhost 8125
+```
+
+You can also use this little function to take care of all the details:
+
+```sh
+#!/usr/bin/env bash
+
+STATSD_HOST="localhost"
+STATSD_PORT="8125"
+statsd() {
+ local udp="-u" all="${*}"
+
+ # if the string length of all parameters given is above 1000, use TCP
+ [ "${#all}" -gt 1000 ] && udp=
+
+ while [ ! -z "${1}" ]
+ do
+ printf "${1}\n"
+ shift
+ done | nc ${udp} --send-only ${STATSD_HOST} ${STATSD_PORT} || return 1
+
+ return 0
+}
+```
+
+You can use it like this:
+
+```sh
+# first, source it in your script
+source statsd.sh
+
+# then, at any point:
+statsd "metric1:10|g" "metric2:10|c" ...
+```
+
+The function is smart enough to call `nc` just once and pass all the metrics to it. It will also automatically switch to TCP if the metrics to send are above 1000 bytes.
diff --git a/collectors/statsd.plugin/example.conf b/collectors/statsd.plugin/example.conf
new file mode 100644
index 000000000..2c7de6c7b
--- /dev/null
+++ b/collectors/statsd.plugin/example.conf
@@ -0,0 +1,64 @@
+# statsd synthetic charts configuration
+
+# You can add many .conf files in /etc/netdata/statsd.d/,
+# one for each of your apps.
+
+# start a new app - you can add many apps in the same file
+[app]
+ # give a name for this app
+ # this controls the main menu on the dashboard
+ # and will be the prefix for all charts of the app
+ name = myexampleapp
+
+ # match all the metrics of the app
+ metrics = myexampleapp.*
+
+ # shall private charts of these metrics be created?
+ private charts = no
+
+ # shall gaps be shown when metrics are not collected?
+ gaps when not collected = no
+
+ # the memory mode for the charts of this app: none|map|save
+ # the default is to use the global memory mode
+ #memory mode = ram
+
+ # the history size for the charts of this app, in seconds
+ # the default is to use the global history
+ #history = 3600
+
+# create a chart
+# this is its id - the chart will be named myexampleapp.myexamplechart
+[myexamplechart]
+ # a name for the chart, similar to the id (2 names for each chart)
+ name = myexamplechart
+
+ # the chart title
+ title = my chart title
+
+ # the submenu of the dashboard
+ family = my family
+
+ # the context for alarm templates
+ context = chart.context
+
+ # the units of the chart
+ units = tests/s
+
+ # the sorting priority of the chart on the dashboard
+ priority = 91000
+
+ # the type of chart to create: line | area | stacked
+ type = area
+
+ # one or more dimensions for the chart
+ # type = events | last | min | max | sum | average | percentile | median | stddev
+ # events = the number of events for this metric
+ # last = the last value collected
+ # all the others are only valid for histograms and timers
+ dimension = myexampleapp.metric1 avg average 1 1
+ dimension = myexampleapp.metric1 lower min 1 1
+ dimension = myexampleapp.metric1 upper max 1 1
+ dimension = myexampleapp.metric2 other last 1 1
+
+# You can add as many charts as needed
diff --git a/collectors/statsd.plugin/statsd.c b/collectors/statsd.plugin/statsd.c
new file mode 100644
index 000000000..c92bfd1c2
--- /dev/null
+++ b/collectors/statsd.plugin/statsd.c
@@ -0,0 +1,2556 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "statsd.h"
+
+#define STATSD_CHART_PREFIX "statsd"
+
+#define PLUGIN_STATSD_NAME "statsd.plugin"
+
+// --------------------------------------------------------------------------------------
+
+// #define STATSD_MULTITHREADED 1
+
+#ifdef STATSD_MULTITHREADED
+// DO NOT ENABLE MULTITHREADING - IT IS NOT WELL TESTED
+#define STATSD_AVL_TREE avl_tree_lock
+#define STATSD_AVL_INSERT avl_insert_lock
+#define STATSD_AVL_SEARCH avl_search_lock
+#define STATSD_AVL_INDEX_INIT { .avl_tree = { NULL, statsd_metric_compare }, .rwlock = AVL_LOCK_INITIALIZER }
+#define STATSD_FIRST_PTR_MUTEX netdata_mutex_t first_mutex
+#define STATSD_FIRST_PTR_MUTEX_INIT .first_mutex = NETDATA_MUTEX_INITIALIZER
+#define STATSD_FIRST_PTR_MUTEX_LOCK(index) netdata_mutex_lock(&((index)->first_mutex))
+#define STATSD_FIRST_PTR_MUTEX_UNLOCK(index) netdata_mutex_unlock(&((index)->first_mutex))
+#define STATSD_DICTIONARY_OPTIONS DICTIONARY_FLAG_DEFAULT
+#else
+#define STATSD_AVL_TREE avl_tree
+#define STATSD_AVL_INSERT avl_insert
+#define STATSD_AVL_SEARCH avl_search
+#define STATSD_AVL_INDEX_INIT { .root = NULL, .compar = statsd_metric_compare }
+#define STATSD_FIRST_PTR_MUTEX
+#define STATSD_FIRST_PTR_MUTEX_INIT
+#define STATSD_FIRST_PTR_MUTEX_LOCK(index)
+#define STATSD_FIRST_PTR_MUTEX_UNLOCK(index)
+#define STATSD_DICTIONARY_OPTIONS DICTIONARY_FLAG_SINGLE_THREADED
+#endif
+
+#define STATSD_DECIMAL_DETAIL 1000 // floating point values get multiplied by this, with the same divisor
+
+// --------------------------------------------------------------------------------------------------------------------
+// data specific to each metric type
+
+typedef struct statsd_metric_gauge {
+ LONG_DOUBLE value;
+} STATSD_METRIC_GAUGE;
+
+typedef struct statsd_metric_counter { // counter and meter
+ long long value;
+} STATSD_METRIC_COUNTER;
+
+typedef struct statsd_histogram_extensions {
+ netdata_mutex_t mutex;
+
+ // average is stored in metric->last
+ collected_number last_min;
+ collected_number last_max;
+ collected_number last_percentile;
+ collected_number last_median;
+ collected_number last_stddev;
+ collected_number last_sum;
+
+ int zeroed;
+
+ RRDDIM *rd_min;
+ RRDDIM *rd_max;
+ RRDDIM *rd_percentile;
+ RRDDIM *rd_median;
+ RRDDIM *rd_stddev;
+ RRDDIM *rd_sum;
+
+ size_t size;
+ size_t used;
+ LONG_DOUBLE *values; // dynamic array of values collected
+} STATSD_METRIC_HISTOGRAM_EXTENSIONS;
+
+typedef struct statsd_metric_histogram { // histogram and timer
+ STATSD_METRIC_HISTOGRAM_EXTENSIONS *ext;
+} STATSD_METRIC_HISTOGRAM;
+
+typedef struct statsd_metric_set {
+ DICTIONARY *dict;
+ size_t unique;
+} STATSD_METRIC_SET;
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// this is a metric - for all types of metrics
+
+typedef enum statsd_metric_options {
+ STATSD_METRIC_OPTION_NONE = 0x00000000, // no options set
+ STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED = 0x00000001, // do not update the chart dimension, when this metric is not collected
+ STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED = 0x00000002, // render a private chart for this metric
+ STATSD_METRIC_OPTION_PRIVATE_CHART_CHECKED = 0x00000004, // the metric has been checked if it should get private chart or not
+ STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT = 0x00000008, // show the count of events for this private chart
+ STATSD_METRIC_OPTION_CHECKED_IN_APPS = 0x00000010, // set when this metric has been checked against apps
+ STATSD_METRIC_OPTION_USED_IN_APPS = 0x00000020, // set when this metric is used in apps
+ STATSD_METRIC_OPTION_CHECKED = 0x00000040, // set when the charting thread checks this metric for use in charts (its usefulness)
+ STATSD_METRIC_OPTION_USEFUL = 0x00000080, // set when the charting thread finds the metric useful (i.e. used in a chart)
+} STATS_METRIC_OPTIONS;
+
+typedef enum statsd_metric_type {
+ STATSD_METRIC_TYPE_GAUGE,
+ STATSD_METRIC_TYPE_COUNTER,
+ STATSD_METRIC_TYPE_METER,
+ STATSD_METRIC_TYPE_TIMER,
+ STATSD_METRIC_TYPE_HISTOGRAM,
+ STATSD_METRIC_TYPE_SET
+} STATSD_METRIC_TYPE;
+
+
+typedef struct statsd_metric {
+ avl avl; // indexing - has to be first
+
+ const char *name; // the name of the metric
+ uint32_t hash; // hash of the name
+
+ STATSD_METRIC_TYPE type;
+
+ // metadata about data collection
+ collected_number events; // the number of times this metric has been collected (never resets)
+ size_t count; // the number of times this metric has been collected since the last flush
+
+ // the actual collected data
+ union {
+ STATSD_METRIC_GAUGE gauge;
+ STATSD_METRIC_COUNTER counter;
+ STATSD_METRIC_HISTOGRAM histogram;
+ STATSD_METRIC_SET set;
+ };
+
+ // chart related members
+ STATS_METRIC_OPTIONS options; // STATSD_METRIC_OPTION_* (bitfield)
+ char reset; // set to 1 by the charting thread to instruct the collector thread(s) to reset this metric
+ collected_number last; // the last value sent to netdata
+ RRDSET *st; // the private chart of this metric
+ RRDDIM *rd_value; // the dimension of this metric value
+ RRDDIM *rd_count; // the dimension for the number of events received
+
+ // linking, used for walking through all metrics
+ struct statsd_metric *next;
+ struct statsd_metric *next_useful;
+} STATSD_METRIC;
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// each type of metric has its own index
+
+typedef struct statsd_index {
+ char *name; // the name of the index of metrics
+ size_t events; // the number of events processed for this index
+ size_t metrics; // the number of metrics in this index
+ size_t useful; // the number of useful metrics in this index
+
+ STATSD_AVL_TREE index; // the AVL tree
+
+ STATSD_METRIC *first; // the linked list of metrics (new metrics are added in front)
+ STATSD_METRIC *first_useful; // the linked list of useful metrics (new metrics are added in front)
+ STATSD_FIRST_PTR_MUTEX; // when mutli-threading is enabled, a lock to protect the linked list
+
+ STATS_METRIC_OPTIONS default_options; // default options for all metrics in this index
+} STATSD_INDEX;
+
+static int statsd_metric_compare(void* a, void* b);
+
+// --------------------------------------------------------------------------------------------------------------------
+// synthetic charts
+
+typedef enum statsd_app_chart_dimension_value_type {
+ STATSD_APP_CHART_DIM_VALUE_TYPE_EVENTS,
+ STATSD_APP_CHART_DIM_VALUE_TYPE_LAST,
+ STATSD_APP_CHART_DIM_VALUE_TYPE_AVERAGE,
+ STATSD_APP_CHART_DIM_VALUE_TYPE_SUM,
+ STATSD_APP_CHART_DIM_VALUE_TYPE_MIN,
+ STATSD_APP_CHART_DIM_VALUE_TYPE_MAX,
+ STATSD_APP_CHART_DIM_VALUE_TYPE_PERCENTILE,
+ STATSD_APP_CHART_DIM_VALUE_TYPE_MEDIAN,
+ STATSD_APP_CHART_DIM_VALUE_TYPE_STDDEV
+} STATSD_APP_CHART_DIM_VALUE_TYPE;
+
+typedef struct statsd_app_chart_dimension {
+ const char *name; // the name of this dimension
+ const char *metric; // the source metric name of this dimension
+ uint32_t metric_hash; // hash for fast string comparisons
+
+ SIMPLE_PATTERN *metric_pattern; // set when the 'metric' is a simple pattern
+
+ collected_number multiplier; // the multipler of the dimension
+ collected_number divisor; // the divisor of the dimension
+ RRDDIM_FLAGS flags; // the RRDDIM flags for this dimension
+
+ STATSD_APP_CHART_DIM_VALUE_TYPE value_type; // which value to use of the source metric
+
+ RRDDIM *rd; // a pointer to the RRDDIM that has been created for this dimension
+ collected_number *value_ptr; // a pointer to the source metric value
+ RRD_ALGORITHM algorithm; // the algorithm of this dimension
+
+ struct statsd_app_chart_dimension *next; // the next dimension for this chart
+} STATSD_APP_CHART_DIM;
+
+typedef struct statsd_app_chart {
+ const char *source;
+ const char *id;
+ const char *name;
+ const char *title;
+ const char *family;
+ const char *context;
+ const char *units;
+ long priority;
+ RRDSET_TYPE chart_type;
+ STATSD_APP_CHART_DIM *dimensions;
+ size_t dimensions_count;
+ size_t dimensions_linked_count;
+
+ RRDSET *st;
+ struct statsd_app_chart *next;
+} STATSD_APP_CHART;
+
+typedef struct statsd_app {
+ const char *name;
+ SIMPLE_PATTERN *metrics;
+ STATS_METRIC_OPTIONS default_options;
+ RRD_MEMORY_MODE rrd_memory_mode;
+ DICTIONARY *dict;
+ long rrd_history_entries;
+
+ const char *source;
+ STATSD_APP_CHART *charts;
+ struct statsd_app *next;
+} STATSD_APP;
+
+// --------------------------------------------------------------------------------------------------------------------
+// global statsd data
+
+struct collection_thread_status {
+ int status;
+ size_t max_sockets;
+
+ netdata_thread_t thread;
+ struct rusage rusage;
+ RRDSET *st_cpu;
+ RRDDIM *rd_user;
+ RRDDIM *rd_system;
+};
+
+static struct statsd {
+ STATSD_INDEX gauges;
+ STATSD_INDEX counters;
+ STATSD_INDEX timers;
+ STATSD_INDEX histograms;
+ STATSD_INDEX meters;
+ STATSD_INDEX sets;
+ size_t unknown_types;
+ size_t socket_errors;
+ size_t tcp_socket_connects;
+ size_t tcp_socket_disconnects;
+ size_t tcp_socket_connected;
+ size_t tcp_socket_reads;
+ size_t tcp_packets_received;
+ size_t tcp_bytes_read;
+ size_t udp_socket_reads;
+ size_t udp_packets_received;
+ size_t udp_bytes_read;
+
+ int enabled;
+ int update_every;
+ SIMPLE_PATTERN *charts_for;
+
+ size_t tcp_idle_timeout;
+ collected_number decimal_detail;
+ size_t private_charts;
+ size_t max_private_charts;
+ size_t max_private_charts_hard;
+ RRD_MEMORY_MODE private_charts_memory_mode;
+ long private_charts_rrd_history_entries;
+ unsigned int private_charts_hidden:1;
+
+ STATSD_APP *apps;
+ size_t recvmmsg_size;
+ size_t histogram_increase_step;
+ double histogram_percentile;
+ char *histogram_percentile_str;
+
+ int threads;
+ struct collection_thread_status *collection_threads_status;
+
+ LISTEN_SOCKETS sockets;
+} statsd = {
+ .enabled = 1,
+ .max_private_charts = 200,
+ .max_private_charts_hard = 1000,
+ .private_charts_hidden = 0,
+ .recvmmsg_size = 10,
+ .decimal_detail = STATSD_DECIMAL_DETAIL,
+
+ .gauges = {
+ .name = "gauge",
+ .events = 0,
+ .metrics = 0,
+ .index = STATSD_AVL_INDEX_INIT,
+ .default_options = STATSD_METRIC_OPTION_NONE,
+ .first = NULL,
+ STATSD_FIRST_PTR_MUTEX_INIT
+ },
+ .counters = {
+ .name = "counter",
+ .events = 0,
+ .metrics = 0,
+ .index = STATSD_AVL_INDEX_INIT,
+ .default_options = STATSD_METRIC_OPTION_NONE,
+ .first = NULL,
+ STATSD_FIRST_PTR_MUTEX_INIT
+ },
+ .timers = {
+ .name = "timer",
+ .events = 0,
+ .metrics = 0,
+ .index = STATSD_AVL_INDEX_INIT,
+ .default_options = STATSD_METRIC_OPTION_NONE,
+ .first = NULL,
+ STATSD_FIRST_PTR_MUTEX_INIT
+ },
+ .histograms = {
+ .name = "histogram",
+ .events = 0,
+ .metrics = 0,
+ .index = STATSD_AVL_INDEX_INIT,
+ .default_options = STATSD_METRIC_OPTION_NONE,
+ .first = NULL,
+ STATSD_FIRST_PTR_MUTEX_INIT
+ },
+ .meters = {
+ .name = "meter",
+ .events = 0,
+ .metrics = 0,
+ .index = STATSD_AVL_INDEX_INIT,
+ .default_options = STATSD_METRIC_OPTION_NONE,
+ .first = NULL,
+ STATSD_FIRST_PTR_MUTEX_INIT
+ },
+ .sets = {
+ .name = "set",
+ .events = 0,
+ .metrics = 0,
+ .index = STATSD_AVL_INDEX_INIT,
+ .default_options = STATSD_METRIC_OPTION_NONE,
+ .first = NULL,
+ STATSD_FIRST_PTR_MUTEX_INIT
+ },
+
+ .tcp_idle_timeout = 600,
+
+ .apps = NULL,
+ .histogram_percentile = 95.0,
+ .histogram_increase_step = 10,
+ .threads = 0,
+ .collection_threads_status = NULL,
+ .sockets = {
+ .config = &netdata_config,
+ .config_section = CONFIG_SECTION_STATSD,
+ .default_bind_to = "udp:localhost tcp:localhost",
+ .default_port = STATSD_LISTEN_PORT,
+ .backlog = STATSD_LISTEN_BACKLOG
+ },
+};
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// statsd index management - add/find metrics
+
+static int statsd_metric_compare(void* a, void* b) {
+ if(((STATSD_METRIC *)a)->hash < ((STATSD_METRIC *)b)->hash) return -1;
+ else if(((STATSD_METRIC *)a)->hash > ((STATSD_METRIC *)b)->hash) return 1;
+ else return strcmp(((STATSD_METRIC *)a)->name, ((STATSD_METRIC *)b)->name);
+}
+
+static inline STATSD_METRIC *statsd_metric_index_find(STATSD_INDEX *index, const char *name, uint32_t hash) {
+ STATSD_METRIC tmp;
+ tmp.name = name;
+ tmp.hash = (hash)?hash:simple_hash(tmp.name);
+
+ return (STATSD_METRIC *)STATSD_AVL_SEARCH(&index->index, (avl *)&tmp);
+}
+
+static inline STATSD_METRIC *statsd_find_or_add_metric(STATSD_INDEX *index, const char *name, STATSD_METRIC_TYPE type) {
+ debug(D_STATSD, "searching for metric '%s' under '%s'", name, index->name);
+
+ uint32_t hash = simple_hash(name);
+
+ STATSD_METRIC *m = statsd_metric_index_find(index, name, hash);
+ if(unlikely(!m)) {
+ debug(D_STATSD, "Creating new %s metric '%s'", index->name, name);
+
+ m = (STATSD_METRIC *)callocz(sizeof(STATSD_METRIC), 1);
+ m->name = strdupz(name);
+ m->hash = hash;
+ m->type = type;
+ m->options = index->default_options;
+
+ if(type == STATSD_METRIC_TYPE_HISTOGRAM || type == STATSD_METRIC_TYPE_TIMER) {
+ m->histogram.ext = callocz(sizeof(STATSD_METRIC_HISTOGRAM_EXTENSIONS), 1);
+ netdata_mutex_init(&m->histogram.ext->mutex);
+ }
+ STATSD_METRIC *n = (STATSD_METRIC *)STATSD_AVL_INSERT(&index->index, (avl *)m);
+ if(unlikely(n != m)) {
+ freez((void *)m->histogram.ext);
+ freez((void *)m->name);
+ freez((void *)m);
+ m = n;
+ }
+ else {
+ STATSD_FIRST_PTR_MUTEX_LOCK(index);
+ index->metrics++;
+ m->next = index->first;
+ index->first = m;
+ STATSD_FIRST_PTR_MUTEX_UNLOCK(index);
+ }
+ }
+
+ index->events++;
+ return m;
+}
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// statsd parsing numbers
+
+static inline LONG_DOUBLE statsd_parse_float(const char *v, LONG_DOUBLE def) {
+ LONG_DOUBLE value;
+
+ if(likely(v && *v)) {
+ char *e = NULL;
+ value = str2ld(v, &e);
+ if(unlikely(e && *e))
+ error("STATSD: excess data '%s' after value '%s'", e, v);
+ }
+ else
+ value = def;
+
+ return value;
+}
+
+static inline LONG_DOUBLE statsd_parse_sampling_rate(const char *v) {
+ LONG_DOUBLE sampling_rate = statsd_parse_float(v, 1.0);
+ if(unlikely(isless(sampling_rate, 0.001))) sampling_rate = 0.001;
+ if(unlikely(isgreater(sampling_rate, 1.0))) sampling_rate = 1.0;
+ return sampling_rate;
+}
+
+static inline long long statsd_parse_int(const char *v, long long def) {
+ long long value;
+
+ if(likely(v && *v)) {
+ char *e = NULL;
+ value = str2ll(v, &e);
+ if(unlikely(e && *e))
+ error("STATSD: excess data '%s' after value '%s'", e, v);
+ }
+ else
+ value = def;
+
+ return value;
+}
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// statsd processors per metric type
+
+static inline void statsd_reset_metric(STATSD_METRIC *m) {
+ m->reset = 0;
+ m->count = 0;
+}
+
+static inline int value_is_zinit(const char *value) {
+ return (value && *value == 'z' && *++value == 'i' && *++value == 'n' && *++value == 'i' && *++value == 't' && *++value == '\0');
+}
+
+#define is_metric_checked(m) ((m)->options & STATSD_METRIC_OPTION_CHECKED)
+#define is_metric_useful_for_collection(m) (!is_metric_checked(m) || ((m)->options & STATSD_METRIC_OPTION_USEFUL))
+
+static inline void statsd_process_gauge(STATSD_METRIC *m, const char *value, const char *sampling) {
+ if(!is_metric_useful_for_collection(m)) return;
+
+ if(unlikely(!value || !*value)) {
+ error("STATSD: metric '%s' of type gauge, with empty value is ignored.", m->name);
+ return;
+ }
+
+ if(unlikely(m->reset)) {
+ // no need to reset anything specific for gauges
+ statsd_reset_metric(m);
+ }
+
+ if(unlikely(value_is_zinit(value))) {
+ // magic loading of metric, without affecting anything
+ }
+ else {
+ if (unlikely(*value == '+' || *value == '-'))
+ m->gauge.value += statsd_parse_float(value, 1.0) / statsd_parse_sampling_rate(sampling);
+ else
+ m->gauge.value = statsd_parse_float(value, 1.0);
+
+ m->events++;
+ m->count++;
+ }
+}
+
+static inline void statsd_process_counter_or_meter(STATSD_METRIC *m, const char *value, const char *sampling) {
+ if(!is_metric_useful_for_collection(m)) return;
+
+ // we accept empty values for counters
+
+ if(unlikely(m->reset)) statsd_reset_metric(m);
+
+ if(unlikely(value_is_zinit(value))) {
+ // magic loading of metric, without affecting anything
+ }
+ else {
+ m->counter.value += llrintl((LONG_DOUBLE) statsd_parse_int(value, 1) / statsd_parse_sampling_rate(sampling));
+
+ m->events++;
+ m->count++;
+ }
+}
+
+#define statsd_process_counter(m, value, sampling) statsd_process_counter_or_meter(m, value, sampling)
+#define statsd_process_meter(m, value, sampling) statsd_process_counter_or_meter(m, value, sampling)
+
+static inline void statsd_process_histogram_or_timer(STATSD_METRIC *m, const char *value, const char *sampling, const char *type) {
+ if(!is_metric_useful_for_collection(m)) return;
+
+ if(unlikely(!value || !*value)) {
+ error("STATSD: metric of type %s, with empty value is ignored.", type);
+ return;
+ }
+
+ if(unlikely(m->reset)) {
+ m->histogram.ext->used = 0;
+ statsd_reset_metric(m);
+ }
+
+ if(unlikely(value_is_zinit(value))) {
+ // magic loading of metric, without affecting anything
+ }
+ else {
+ LONG_DOUBLE v = statsd_parse_float(value, 1.0);
+ LONG_DOUBLE sampling_rate = statsd_parse_sampling_rate(sampling);
+ if(unlikely(isless(sampling_rate, 0.01))) sampling_rate = 0.01;
+ if(unlikely(isgreater(sampling_rate, 1.0))) sampling_rate = 1.0;
+
+ long long samples = llrintl(1.0 / sampling_rate);
+ while(samples-- > 0) {
+
+ if(unlikely(m->histogram.ext->used == m->histogram.ext->size)) {
+ netdata_mutex_lock(&m->histogram.ext->mutex);
+ m->histogram.ext->size += statsd.histogram_increase_step;
+ m->histogram.ext->values = reallocz(m->histogram.ext->values, sizeof(LONG_DOUBLE) * m->histogram.ext->size);
+ netdata_mutex_unlock(&m->histogram.ext->mutex);
+ }
+
+ m->histogram.ext->values[m->histogram.ext->used++] = v;
+ }
+
+ m->events++;
+ m->count++;
+ }
+}
+
+#define statsd_process_timer(m, value, sampling) statsd_process_histogram_or_timer(m, value, sampling, "timer")
+#define statsd_process_histogram(m, value, sampling) statsd_process_histogram_or_timer(m, value, sampling, "histogram")
+
+static inline void statsd_process_set(STATSD_METRIC *m, const char *value) {
+ if(!is_metric_useful_for_collection(m)) return;
+
+ if(unlikely(!value || !*value)) {
+ error("STATSD: metric of type set, with empty value is ignored.");
+ return;
+ }
+
+ if(unlikely(m->reset)) {
+ if(likely(m->set.dict)) {
+ dictionary_destroy(m->set.dict);
+ m->set.dict = NULL;
+ }
+ statsd_reset_metric(m);
+ }
+
+ if (unlikely(!m->set.dict)) {
+ m->set.dict = dictionary_create(STATSD_DICTIONARY_OPTIONS | DICTIONARY_FLAG_VALUE_LINK_DONT_CLONE);
+ m->set.unique = 0;
+ }
+
+ if(unlikely(value_is_zinit(value))) {
+ // magic loading of metric, without affecting anything
+ }
+ else {
+ void *t = dictionary_get(m->set.dict, value);
+ if (unlikely(!t)) {
+ dictionary_set(m->set.dict, value, NULL, 1);
+ m->set.unique++;
+ }
+
+ m->events++;
+ m->count++;
+ }
+}
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// statsd parsing
+
+static void statsd_process_metric(const char *name, const char *value, const char *type, const char *sampling, const char *tags) {
+ (void)tags;
+
+ debug(D_STATSD, "STATSD: raw metric '%s', value '%s', type '%s', sampling '%s', tags '%s'", name?name:"(null)", value?value:"(null)", type?type:"(null)", sampling?sampling:"(null)", tags?tags:"(null)");
+
+ if(unlikely(!name || !*name)) return;
+ if(unlikely(!type || !*type)) type = "m";
+
+ char t0 = type[0], t1 = type[1];
+
+ if(unlikely(t0 == 'g' && t1 == '\0')) {
+ statsd_process_gauge(
+ statsd_find_or_add_metric(&statsd.gauges, name, STATSD_METRIC_TYPE_GAUGE),
+ value, sampling);
+ }
+ else if(unlikely((t0 == 'c' || t0 == 'C') && t1 == '\0')) {
+ // etsy/statsd uses 'c'
+ // brubeck uses 'C'
+ statsd_process_counter(
+ statsd_find_or_add_metric(&statsd.counters, name, STATSD_METRIC_TYPE_COUNTER),
+ value, sampling);
+ }
+ else if(unlikely(t0 == 'm' && t1 == '\0')) {
+ statsd_process_meter(
+ statsd_find_or_add_metric(&statsd.meters, name, STATSD_METRIC_TYPE_METER),
+ value, sampling);
+ }
+ else if(unlikely(t0 == 'h' && t1 == '\0')) {
+ statsd_process_histogram(
+ statsd_find_or_add_metric(&statsd.histograms, name, STATSD_METRIC_TYPE_HISTOGRAM),
+ value, sampling);
+ }
+ else if(unlikely(t0 == 's' && t1 == '\0')) {
+ statsd_process_set(
+ statsd_find_or_add_metric(&statsd.sets, name, STATSD_METRIC_TYPE_SET),
+ value);
+ }
+ else if(unlikely(t0 == 'm' && t1 == 's' && type[2] == '\0')) {
+ statsd_process_timer(
+ statsd_find_or_add_metric(&statsd.timers, name, STATSD_METRIC_TYPE_TIMER),
+ value, sampling);
+ }
+ else {
+ statsd.unknown_types++;
+ error("STATSD: metric '%s' with value '%s' is sent with unknown metric type '%s'", name, value?value:"", type);
+ }
+}
+
+static inline const char *statsd_parse_skip_up_to(const char *s, char d1, char d2) {
+ char c;
+
+ for(c = *s; c && c != d1 && c != d2 && c != '\r' && c != '\n'; c = *++s) ;
+
+ return s;
+}
+
+const char *statsd_parse_skip_spaces(const char *s) {
+ char c;
+
+ for(c = *s; c && ( c == ' ' || c == '\t' || c == '\r' || c == '\n' ); c = *++s) ;
+
+ return s;
+}
+
+static inline const char *statsd_parse_field_trim(const char *start, char *end) {
+ if(unlikely(!start)) {
+ start = end;
+ return start;
+ }
+
+ while(start <= end && (*start == ' ' || *start == '\t'))
+ start++;
+
+ *end = '\0';
+ end--;
+ while(end >= start && (*end == ' ' || *end == '\t'))
+ *end-- = '\0';
+
+ return start;
+}
+
+static inline size_t statsd_process(char *buffer, size_t size, int require_newlines) {
+ buffer[size] = '\0';
+ debug(D_STATSD, "RECEIVED: %zu bytes: '%s'", size, buffer);
+
+ const char *s = buffer;
+ while(*s) {
+ const char *name = NULL, *value = NULL, *type = NULL, *sampling = NULL, *tags = NULL;
+ char *name_end = NULL, *value_end = NULL, *type_end = NULL, *sampling_end = NULL, *tags_end = NULL;
+
+ s = name_end = (char *)statsd_parse_skip_up_to(name = s, ':', '|');
+ if(name == name_end) {
+ s = statsd_parse_skip_spaces(s);
+ continue;
+ }
+
+ if(likely(*s == ':'))
+ s = value_end = (char *) statsd_parse_skip_up_to(value = ++s, '|', '|');
+
+ if(likely(*s == '|'))
+ s = type_end = (char *) statsd_parse_skip_up_to(type = ++s, '|', '@');
+
+ if(likely(*s == '|' || *s == '@')) {
+ s = sampling_end = (char *) statsd_parse_skip_up_to(sampling = ++s, '|', '#');
+ if(*sampling == '@') sampling++;
+ }
+
+ if(likely(*s == '|' || *s == '#')) {
+ s = tags_end = (char *) statsd_parse_skip_up_to(tags = ++s, '|', '|');
+ if(*tags == '#') tags++;
+ }
+
+ // skip everything until the end of the line
+ while(*s && *s != '\n') s++;
+
+ if(unlikely(require_newlines && *s != '\n' && s > buffer)) {
+ // move the remaining data to the beginning
+ size -= (name - buffer);
+ memmove(buffer, name, size);
+ return size;
+ }
+ else
+ s = statsd_parse_skip_spaces(s);
+
+ statsd_process_metric(
+ statsd_parse_field_trim(name, name_end)
+ , statsd_parse_field_trim(value, value_end)
+ , statsd_parse_field_trim(type, type_end)
+ , statsd_parse_field_trim(sampling, sampling_end)
+ , statsd_parse_field_trim(tags, tags_end)
+ );
+ }
+
+ return 0;
+}
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// statsd pollfd interface
+
+#define STATSD_TCP_BUFFER_SIZE 65536 // minimize tcp reads
+#define STATSD_UDP_BUFFER_SIZE 9000 // this should be up to MTU
+
+typedef enum {
+ STATSD_SOCKET_DATA_TYPE_TCP,
+ STATSD_SOCKET_DATA_TYPE_UDP
+} STATSD_SOCKET_DATA_TYPE;
+
+struct statsd_tcp {
+ STATSD_SOCKET_DATA_TYPE type;
+ size_t size;
+ size_t len;
+ char buffer[];
+};
+
+#ifdef HAVE_RECVMMSG
+struct statsd_udp {
+ int *running;
+ STATSD_SOCKET_DATA_TYPE type;
+ size_t size;
+ struct iovec *iovecs;
+ struct mmsghdr *msgs;
+};
+#else
+struct statsd_udp {
+ int *running;
+ STATSD_SOCKET_DATA_TYPE type;
+ char buffer[STATSD_UDP_BUFFER_SIZE];
+};
+#endif
+
+// new TCP client connected
+static void *statsd_add_callback(POLLINFO *pi, short int *events, void *data) {
+ (void)pi;
+ (void)data;
+
+ *events = POLLIN;
+
+ struct statsd_tcp *t = (struct statsd_tcp *)callocz(sizeof(struct statsd_tcp) + STATSD_TCP_BUFFER_SIZE, 1);
+ t->type = STATSD_SOCKET_DATA_TYPE_TCP;
+ t->size = STATSD_TCP_BUFFER_SIZE - 1;
+ statsd.tcp_socket_connects++;
+ statsd.tcp_socket_connected++;
+
+ return t;
+}
+
+// TCP client disconnected
+static void statsd_del_callback(POLLINFO *pi) {
+ struct statsd_tcp *t = pi->data;
+
+ if(likely(t)) {
+ if(t->type == STATSD_SOCKET_DATA_TYPE_TCP) {
+ if(t->len != 0) {
+ statsd.socket_errors++;
+ error("STATSD: client is probably sending unterminated metrics. Closed socket left with '%s'. Trying to process it.", t->buffer);
+ statsd_process(t->buffer, t->len, 0);
+ }
+ statsd.tcp_socket_disconnects++;
+ statsd.tcp_socket_connected--;
+ }
+ else
+ error("STATSD: internal error: received socket data type is %d, but expected %d", (int)t->type, (int)STATSD_SOCKET_DATA_TYPE_TCP);
+
+ freez(t);
+ }
+}
+
+// Receive data
+static int statsd_rcv_callback(POLLINFO *pi, short int *events) {
+ *events = POLLIN;
+
+ int fd = pi->fd;
+
+ switch(pi->socktype) {
+ case SOCK_STREAM: {
+ struct statsd_tcp *d = (struct statsd_tcp *)pi->data;
+ if(unlikely(!d)) {
+ error("STATSD: internal error: expected TCP data pointer is NULL");
+ statsd.socket_errors++;
+ return -1;
+ }
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(unlikely(d->type != STATSD_SOCKET_DATA_TYPE_TCP)) {
+ error("STATSD: internal error: socket data type should be %d, but it is %d", (int)STATSD_SOCKET_DATA_TYPE_TCP, (int)d->type);
+ statsd.socket_errors++;
+ return -1;
+ }
+#endif
+
+ int ret = 0;
+ ssize_t rc;
+ do {
+ rc = recv(fd, &d->buffer[d->len], d->size - d->len, MSG_DONTWAIT);
+ if (rc < 0) {
+ // read failed
+ if (errno != EWOULDBLOCK && errno != EAGAIN && errno != EINTR) {
+ error("STATSD: recv() on TCP socket %d failed.", fd);
+ statsd.socket_errors++;
+ ret = -1;
+ }
+ }
+ else if (!rc) {
+ // connection closed
+ debug(D_STATSD, "STATSD: client disconnected.");
+ ret = -1;
+ }
+ else {
+ // data received
+ d->len += rc;
+ statsd.tcp_socket_reads++;
+ statsd.tcp_bytes_read += rc;
+ }
+
+ if(likely(d->len > 0)) {
+ statsd.tcp_packets_received++;
+ d->len = statsd_process(d->buffer, d->len, 1);
+ }
+
+ if(unlikely(ret == -1))
+ return -1;
+
+ } while (rc != -1);
+ break;
+ }
+
+ case SOCK_DGRAM: {
+ struct statsd_udp *d = (struct statsd_udp *)pi->data;
+ if(unlikely(!d)) {
+ error("STATSD: internal error: expected UDP data pointer is NULL");
+ statsd.socket_errors++;
+ return -1;
+ }
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(unlikely(d->type != STATSD_SOCKET_DATA_TYPE_UDP)) {
+ error("STATSD: internal error: socket data should be %d, but it is %d", (int)d->type, (int)STATSD_SOCKET_DATA_TYPE_UDP);
+ statsd.socket_errors++;
+ return -1;
+ }
+#endif
+
+#ifdef HAVE_RECVMMSG
+ ssize_t rc;
+ do {
+ rc = recvmmsg(fd, d->msgs, (unsigned int)d->size, MSG_DONTWAIT, NULL);
+ if (rc < 0) {
+ // read failed
+ if (errno != EWOULDBLOCK && errno != EAGAIN && errno != EINTR) {
+ error("STATSD: recvmmsg() on UDP socket %d failed.", fd);
+ statsd.socket_errors++;
+ return -1;
+ }
+ } else if (rc) {
+ // data received
+ statsd.udp_socket_reads++;
+ statsd.udp_packets_received += rc;
+
+ size_t i;
+ for (i = 0; i < (size_t)rc; ++i) {
+ size_t len = (size_t)d->msgs[i].msg_len;
+ statsd.udp_bytes_read += len;
+ statsd_process(d->msgs[i].msg_hdr.msg_iov->iov_base, len, 0);
+ }
+ }
+ } while (rc != -1);
+
+#else // !HAVE_RECVMMSG
+ ssize_t rc;
+ do {
+ rc = recv(fd, d->buffer, STATSD_UDP_BUFFER_SIZE - 1, MSG_DONTWAIT);
+ if (rc < 0) {
+ // read failed
+ if (errno != EWOULDBLOCK && errno != EAGAIN && errno != EINTR) {
+ error("STATSD: recv() on UDP socket %d failed.", fd);
+ statsd.socket_errors++;
+ return -1;
+ }
+ } else if (rc) {
+ // data received
+ statsd.udp_socket_reads++;
+ statsd.udp_packets_received++;
+ statsd.udp_bytes_read += rc;
+ statsd_process(d->buffer, (size_t) rc, 0);
+ }
+ } while (rc != -1);
+#endif
+
+ break;
+ }
+
+ default: {
+ error("STATSD: internal error: unknown socktype %d on socket %d", pi->socktype, fd);
+ statsd.socket_errors++;
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int statsd_snd_callback(POLLINFO *pi, short int *events) {
+ (void)pi;
+ (void)events;
+
+ error("STATSD: snd_callback() called, but we never requested to send data to statsd clients.");
+ return -1;
+}
+
+static void statsd_timer_callback(void *timer_data) {
+ struct collection_thread_status *status = timer_data;
+ getrusage(RUSAGE_THREAD, &status->rusage);
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// statsd child thread to collect metrics from network
+
+void statsd_collector_thread_cleanup(void *data) {
+ struct statsd_udp *d = data;
+ *d->running = 0;
+
+ info("cleaning up...");
+
+#ifdef HAVE_RECVMMSG
+ size_t i;
+ for (i = 0; i < d->size; i++)
+ freez(d->iovecs[i].iov_base);
+
+ freez(d->iovecs);
+ freez(d->msgs);
+#endif
+
+ freez(d);
+}
+
+void *statsd_collector_thread(void *ptr) {
+ struct collection_thread_status *status = ptr;
+ status->status = 1;
+
+ info("STATSD collector thread started with taskid %d", gettid());
+
+ struct statsd_udp *d = callocz(sizeof(struct statsd_udp), 1);
+ d->running = &status->status;
+
+ netdata_thread_cleanup_push(statsd_collector_thread_cleanup, d);
+
+#ifdef HAVE_RECVMMSG
+ d->type = STATSD_SOCKET_DATA_TYPE_UDP;
+ d->size = statsd.recvmmsg_size;
+ d->iovecs = callocz(sizeof(struct iovec), d->size);
+ d->msgs = callocz(sizeof(struct mmsghdr), d->size);
+
+ size_t i;
+ for (i = 0; i < d->size; i++) {
+ d->iovecs[i].iov_base = mallocz(STATSD_UDP_BUFFER_SIZE);
+ d->iovecs[i].iov_len = STATSD_UDP_BUFFER_SIZE - 1;
+ d->msgs[i].msg_hdr.msg_iov = &d->iovecs[i];
+ d->msgs[i].msg_hdr.msg_iovlen = 1;
+ }
+#endif
+
+ poll_events(&statsd.sockets
+ , statsd_add_callback
+ , statsd_del_callback
+ , statsd_rcv_callback
+ , statsd_snd_callback
+ , statsd_timer_callback
+ , NULL
+ , (void *)d
+ , 0 // tcp request timeout, 0 = disabled
+ , statsd.tcp_idle_timeout // tcp idle timeout, 0 = disabled
+ , statsd.update_every * 1000
+ , ptr // timer_data
+ , status->max_sockets
+ );
+
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// statsd applications configuration files parsing
+
+#define STATSD_CONF_LINE_MAX 8192
+
+static STATSD_APP_CHART_DIM_VALUE_TYPE string2valuetype(const char *type, size_t line, const char *filename) {
+ if(!type || !*type) type = "last";
+
+ if(!strcmp(type, "events")) return STATSD_APP_CHART_DIM_VALUE_TYPE_EVENTS;
+ else if(!strcmp(type, "last")) return STATSD_APP_CHART_DIM_VALUE_TYPE_LAST;
+ else if(!strcmp(type, "min")) return STATSD_APP_CHART_DIM_VALUE_TYPE_MIN;
+ else if(!strcmp(type, "max")) return STATSD_APP_CHART_DIM_VALUE_TYPE_MAX;
+ else if(!strcmp(type, "sum")) return STATSD_APP_CHART_DIM_VALUE_TYPE_SUM;
+ else if(!strcmp(type, "average")) return STATSD_APP_CHART_DIM_VALUE_TYPE_AVERAGE;
+ else if(!strcmp(type, "median")) return STATSD_APP_CHART_DIM_VALUE_TYPE_MEDIAN;
+ else if(!strcmp(type, "stddev")) return STATSD_APP_CHART_DIM_VALUE_TYPE_STDDEV;
+ else if(!strcmp(type, "percentile")) return STATSD_APP_CHART_DIM_VALUE_TYPE_PERCENTILE;
+
+ error("STATSD: invalid type '%s' at line %zu of file '%s'. Using 'last'.", type, line, filename);
+ return STATSD_APP_CHART_DIM_VALUE_TYPE_LAST;
+}
+
+static const char *valuetype2string(STATSD_APP_CHART_DIM_VALUE_TYPE type) {
+ switch(type) {
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_EVENTS: return "events";
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_LAST: return "last";
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_MIN: return "min";
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_MAX: return "max";
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_SUM: return "sum";
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_AVERAGE: return "average";
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_MEDIAN: return "median";
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_STDDEV: return "stddev";
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_PERCENTILE: return "percentile";
+ }
+
+ return "unknown";
+}
+
+static STATSD_APP_CHART_DIM *add_dimension_to_app_chart(
+ STATSD_APP *app
+ , STATSD_APP_CHART *chart
+ , const char *metric_name
+ , const char *dim_name
+ , collected_number multiplier
+ , collected_number divisor
+ , RRDDIM_FLAGS flags
+ , STATSD_APP_CHART_DIM_VALUE_TYPE value_type
+) {
+ STATSD_APP_CHART_DIM *dim = callocz(sizeof(STATSD_APP_CHART_DIM), 1);
+
+ dim->metric = strdupz(metric_name);
+ dim->metric_hash = simple_hash(dim->metric);
+
+ dim->name = strdupz((dim_name)?dim_name:"");
+ dim->multiplier = multiplier;
+ dim->divisor = divisor;
+ dim->value_type = value_type;
+ dim->flags = flags;
+
+ if(!dim->multiplier)
+ dim->multiplier = 1;
+
+ if(!dim->divisor)
+ dim->divisor = 1;
+
+ // append it to the list of dimension
+ STATSD_APP_CHART_DIM *tdim;
+ for(tdim = chart->dimensions; tdim && tdim->next ; tdim = tdim->next) ;
+ if(!tdim) {
+ dim->next = chart->dimensions;
+ chart->dimensions = dim;
+ }
+ else {
+ dim->next = tdim->next;
+ tdim->next = dim;
+ }
+ chart->dimensions_count++;
+
+ debug(D_STATSD, "Added dimension '%s' to chart '%s' of app '%s', for metric '%s', with type %u, multiplier " COLLECTED_NUMBER_FORMAT ", divisor " COLLECTED_NUMBER_FORMAT,
+ dim->name, chart->id, app->name, dim->metric, dim->value_type, dim->multiplier, dim->divisor);
+
+ return dim;
+}
+
+static int statsd_readfile(const char *filename, STATSD_APP *app, STATSD_APP_CHART *chart, DICTIONARY *dict) {
+ debug(D_STATSD, "STATSD configuration reading file '%s'", filename);
+
+ char *buffer = mallocz(STATSD_CONF_LINE_MAX + 1);
+
+ FILE *fp = fopen(filename, "r");
+ if(!fp) {
+ error("STATSD: cannot open file '%s'.", filename);
+ freez(buffer);
+ return -1;
+ }
+
+ size_t line = 0;
+ char *s;
+ while(fgets(buffer, STATSD_CONF_LINE_MAX, fp) != NULL) {
+ buffer[STATSD_CONF_LINE_MAX] = '\0';
+ line++;
+
+ s = trim(buffer);
+ if (!s || *s == '#') {
+ debug(D_STATSD, "STATSD: ignoring line %zu of file '%s', it is empty.", line, filename);
+ continue;
+ }
+
+ debug(D_STATSD, "STATSD: processing line %zu of file '%s': %s", line, filename, buffer);
+
+ if(*s == 'i' && strncmp(s, "include", 7) == 0) {
+ s = trim(&s[7]);
+ if(s && *s) {
+ char *tmp;
+ if(*s == '/')
+ tmp = strdupz(s);
+ else {
+ // the file to be included is relative to current file
+ // find the directory name from the file we already read
+ char *filename2 = strdupz(filename); // copy filename, since dirname() will change it
+ char *dir = dirname(filename2); // find the directory part of the filename
+ tmp = strdupz_path_subpath(dir, s); // compose the new filename to read;
+ freez(filename2); // free the filename we copied
+ }
+ statsd_readfile(tmp, app, chart, dict);
+ freez(tmp);
+ }
+ else
+ error("STATSD: ignoring line %zu of file '%s', include filename is empty", line, filename);
+
+ continue;
+ }
+
+ int len = (int) strlen(s);
+ if (*s == '[' && s[len - 1] == ']') {
+ // new section
+ s[len - 1] = '\0';
+ s++;
+
+ if (!strcmp(s, "app")) {
+ // a new app
+ app = callocz(sizeof(STATSD_APP), 1);
+ app->name = strdupz("unnamed");
+ app->rrd_memory_mode = localhost->rrd_memory_mode;
+ app->rrd_history_entries = localhost->rrd_history_entries;
+
+ app->next = statsd.apps;
+ statsd.apps = app;
+ chart = NULL;
+ dict = NULL;
+
+ {
+ char lineandfile[FILENAME_MAX + 1];
+ snprintfz(lineandfile, FILENAME_MAX, "%zu@%s", line, filename);
+ app->source = strdupz(lineandfile);
+ }
+ }
+ else if(app) {
+ if(!strcmp(s, "dictionary")) {
+ if(!app->dict)
+ app->dict = dictionary_create(DICTIONARY_FLAG_SINGLE_THREADED);
+
+ dict = app->dict;
+ }
+ else {
+ dict = NULL;
+
+ // a new chart
+ chart = callocz(sizeof(STATSD_APP_CHART), 1);
+ netdata_fix_chart_id(s);
+ chart->id = strdupz(s);
+ chart->name = strdupz(s);
+ chart->title = strdupz("Statsd chart");
+ chart->context = strdupz(s);
+ chart->family = strdupz("overview");
+ chart->units = strdupz("value");
+ chart->priority = NETDATA_CHART_PRIO_STATSD_PRIVATE;
+ chart->chart_type = RRDSET_TYPE_LINE;
+
+ chart->next = app->charts;
+ app->charts = chart;
+
+ {
+ char lineandfile[FILENAME_MAX + 1];
+ snprintfz(lineandfile, FILENAME_MAX, "%zu@%s", line, filename);
+ chart->source = strdupz(lineandfile);
+ }
+ }
+ }
+ else
+ error("STATSD: ignoring line %zu ('%s') of file '%s', [app] is not defined.", line, s, filename);
+
+ continue;
+ }
+
+ if(!app) {
+ error("STATSD: ignoring line %zu ('%s') of file '%s', it is outside all sections.", line, s, filename);
+ continue;
+ }
+
+ char *name = s;
+ char *value = strchr(s, '=');
+ if(!value) {
+ error("STATSD: ignoring line %zu ('%s') of file '%s', there is no = in it.", line, s, filename);
+ continue;
+ }
+ *value = '\0';
+ value++;
+
+ name = trim(name);
+ value = trim(value);
+
+ if(!name || *name == '#') {
+ error("STATSD: ignoring line %zu of file '%s', name is empty.", line, filename);
+ continue;
+ }
+ if(!value) {
+ debug(D_CONFIG, "STATSD: ignoring line %zu of file '%s', value is empty.", line, filename);
+ continue;
+ }
+
+ if(unlikely(dict)) {
+ // parse [dictionary] members
+
+ dictionary_set(dict, name, value, strlen(value) + 1);
+ }
+ else if(!chart) {
+ // parse [app] members
+
+ if(!strcmp(name, "name")) {
+ freez((void *)app->name);
+ netdata_fix_chart_name(value);
+ app->name = strdupz(value);
+ }
+ else if (!strcmp(name, "metrics")) {
+ simple_pattern_free(app->metrics);
+ app->metrics = simple_pattern_create(value, NULL, SIMPLE_PATTERN_EXACT);
+ }
+ else if (!strcmp(name, "private charts")) {
+ if (!strcmp(value, "yes") || !strcmp(value, "on"))
+ app->default_options |= STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED;
+ else
+ app->default_options &= ~STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED;
+ }
+ else if (!strcmp(name, "gaps when not collected")) {
+ if (!strcmp(value, "yes") || !strcmp(value, "on"))
+ app->default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED;
+ }
+ else if (!strcmp(name, "memory mode")) {
+ app->rrd_memory_mode = rrd_memory_mode_id(value);
+ }
+ else if (!strcmp(name, "history")) {
+ app->rrd_history_entries = atol(value);
+ if (app->rrd_history_entries < 5)
+ app->rrd_history_entries = 5;
+ }
+ else {
+ error("STATSD: ignoring line %zu ('%s') of file '%s'. Unknown keyword for the [app] section.", line, name, filename);
+ continue;
+ }
+ }
+ else {
+ // parse [chart] members
+
+ if(!strcmp(name, "name")) {
+ freez((void *)chart->name);
+ netdata_fix_chart_id(value);
+ chart->name = strdupz(value);
+ }
+ else if(!strcmp(name, "title")) {
+ freez((void *)chart->title);
+ chart->title = strdupz(value);
+ }
+ else if (!strcmp(name, "family")) {
+ freez((void *)chart->family);
+ chart->family = strdupz(value);
+ }
+ else if (!strcmp(name, "context")) {
+ freez((void *)chart->context);
+ netdata_fix_chart_id(value);
+ chart->context = strdupz(value);
+ }
+ else if (!strcmp(name, "units")) {
+ freez((void *)chart->units);
+ chart->units = strdupz(value);
+ }
+ else if (!strcmp(name, "priority")) {
+ chart->priority = atol(value);
+ }
+ else if (!strcmp(name, "type")) {
+ chart->chart_type = rrdset_type_id(value);
+ }
+ else if (!strcmp(name, "dimension")) {
+ // metric [name [type [multiplier [divisor]]]]
+ char *words[10];
+ pluginsd_split_words(value, words, 10);
+
+ int pattern = 0;
+ size_t i = 0;
+ char *metric_name = words[i++];
+
+ if(strcmp(metric_name, "pattern") == 0) {
+ metric_name = words[i++];
+ pattern = 1;
+ }
+
+ char *dim_name = words[i++];
+ char *type = words[i++];
+ char *multipler = words[i++];
+ char *divisor = words[i++];
+ char *options = words[i++];
+
+ RRDDIM_FLAGS flags = RRDDIM_FLAG_NONE;
+ if(options && *options) {
+ if(strstr(options, "hidden") != NULL) flags |= RRDDIM_FLAG_HIDDEN;
+ if(strstr(options, "noreset") != NULL) flags |= RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS;
+ if(strstr(options, "nooverflow") != NULL) flags |= RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS;
+ }
+
+ if(!pattern) {
+ if(app->dict) {
+ if(dim_name && *dim_name) {
+ char *n = dictionary_get(app->dict, dim_name);
+ if(n) dim_name = n;
+ }
+ else {
+ dim_name = dictionary_get(app->dict, metric_name);
+ }
+ }
+
+ if(!dim_name || !*dim_name)
+ dim_name = metric_name;
+ }
+
+ STATSD_APP_CHART_DIM *dim = add_dimension_to_app_chart(
+ app
+ , chart
+ , metric_name
+ , dim_name
+ , (multipler && *multipler)?str2l(multipler):1
+ , (divisor && *divisor)?str2l(divisor):1
+ , flags
+ , string2valuetype(type, line, filename)
+ );
+
+ if(pattern)
+ dim->metric_pattern = simple_pattern_create(dim->metric, NULL, SIMPLE_PATTERN_EXACT);
+ }
+ else {
+ error("STATSD: ignoring line %zu ('%s') of file '%s'. Unknown keyword for the [%s] section.", line, name, filename, chart->id);
+ continue;
+ }
+ }
+ }
+
+ freez(buffer);
+ fclose(fp);
+ return 0;
+}
+
+static int statsd_file_callback(const char *filename, void *data) {
+ (void)data;
+ return statsd_readfile(filename, NULL, NULL, NULL);
+}
+
+static inline void statsd_readdir(const char *user_path, const char *stock_path, const char *subpath) {
+ recursive_config_double_dir_load(user_path, stock_path, subpath, statsd_file_callback, NULL, 0);
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// send metrics to netdata - in private charts - called from the main thread
+
+// extract chart type and chart id from metric name
+static inline void statsd_get_metric_type_and_id(STATSD_METRIC *m, char *type, char *id, const char *defid, size_t len) {
+ char *s;
+
+ snprintfz(type, len, "%s_%s_%s", STATSD_CHART_PREFIX, defid, m->name);
+ for(s = type; *s ;s++)
+ if(unlikely(*s == '.')) break;
+
+ if(*s == '.') {
+ *s++ = '\0';
+ strncpyz(id, s, len);
+ }
+ else {
+ strncpyz(id, defid, len);
+ }
+
+ netdata_fix_chart_id(type);
+ netdata_fix_chart_id(id);
+}
+
+static inline RRDSET *statsd_private_rrdset_create(
+ STATSD_METRIC *m
+ , const char *type
+ , const char *id
+ , const char *name
+ , const char *family
+ , const char *context
+ , const char *title
+ , const char *units
+ , long priority
+ , int update_every
+ , RRDSET_TYPE chart_type
+) {
+ RRD_MEMORY_MODE memory_mode = statsd.private_charts_memory_mode;
+ long history = statsd.private_charts_rrd_history_entries;
+
+ if(unlikely(statsd.private_charts >= statsd.max_private_charts)) {
+ debug(D_STATSD, "STATSD: metric '%s' will be charted with memory mode = none, because the maximum number of charts has been reached.", m->name);
+ info("STATSD: metric '%s' will be charted with memory mode = none, because the maximum number of charts (%zu) has been reached. Increase the number of charts by editing netdata.conf, [statsd] section.", m->name, statsd.max_private_charts);
+ memory_mode = RRD_MEMORY_MODE_NONE;
+ history = 5;
+ }
+
+ statsd.private_charts++;
+ RRDSET *st = rrdset_create_custom(
+ localhost // host
+ , type // type
+ , id // id
+ , name // name
+ , family // family
+ , context // context
+ , title // title
+ , units // units
+ , PLUGIN_STATSD_NAME // plugin
+ , "private_chart" // module
+ , priority // priority
+ , update_every // update every
+ , chart_type // chart type
+ , memory_mode // memory mode
+ , history // history
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_STORE_FIRST);
+
+ if(statsd.private_charts_hidden)
+ rrdset_flag_set(st, RRDSET_FLAG_HIDDEN);
+
+ // rrdset_flag_set(st, RRDSET_FLAG_DEBUG);
+ return st;
+}
+
+static inline void statsd_private_chart_gauge(STATSD_METRIC *m) {
+ debug(D_STATSD, "updating private chart for gauge metric '%s'", m->name);
+
+ if(unlikely(!m->st)) {
+ char type[RRD_ID_LENGTH_MAX + 1], id[RRD_ID_LENGTH_MAX + 1];
+ statsd_get_metric_type_and_id(m, type, id, "gauge", RRD_ID_LENGTH_MAX);
+
+ char context[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(context, RRD_ID_LENGTH_MAX, "statsd_gauge.%s", m->name);
+
+ char title[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(title, RRD_ID_LENGTH_MAX, "statsd private chart for gauge %s", m->name);
+
+ m->st = statsd_private_rrdset_create(
+ m
+ , type
+ , id
+ , NULL // name
+ , "gauges" // family (submenu)
+ , context // context
+ , title // title
+ , "value" // units
+ , NETDATA_CHART_PRIO_STATSD_PRIVATE
+ , statsd.update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ m->rd_value = rrddim_add(m->st, "gauge", NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE);
+
+ if(m->options & STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT)
+ m->rd_count = rrddim_add(m->st, "events", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(m->st);
+
+ rrddim_set_by_pointer(m->st, m->rd_value, m->last);
+
+ if(m->rd_count)
+ rrddim_set_by_pointer(m->st, m->rd_count, m->events);
+
+ rrdset_done(m->st);
+}
+
+static inline void statsd_private_chart_counter_or_meter(STATSD_METRIC *m, const char *dim, const char *family) {
+ debug(D_STATSD, "updating private chart for %s metric '%s'", dim, m->name);
+
+ if(unlikely(!m->st)) {
+ char type[RRD_ID_LENGTH_MAX + 1], id[RRD_ID_LENGTH_MAX + 1];
+ statsd_get_metric_type_and_id(m, type, id, dim, RRD_ID_LENGTH_MAX);
+
+ char context[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(context, RRD_ID_LENGTH_MAX, "statsd_%s.%s", dim, m->name);
+
+ char title[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(title, RRD_ID_LENGTH_MAX, "statsd private chart for %s %s", dim, m->name);
+
+ m->st = statsd_private_rrdset_create(
+ m
+ , type
+ , id
+ , NULL // name
+ , family // family (submenu)
+ , context // context
+ , title // title
+ , "events/s" // units
+ , NETDATA_CHART_PRIO_STATSD_PRIVATE
+ , statsd.update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ m->rd_value = rrddim_add(m->st, dim, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ if(m->options & STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT)
+ m->rd_count = rrddim_add(m->st, "events", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(m->st);
+
+ rrddim_set_by_pointer(m->st, m->rd_value, m->last);
+
+ if(m->rd_count)
+ rrddim_set_by_pointer(m->st, m->rd_count, m->events);
+
+ rrdset_done(m->st);
+}
+
+static inline void statsd_private_chart_set(STATSD_METRIC *m) {
+ debug(D_STATSD, "updating private chart for set metric '%s'", m->name);
+
+ if(unlikely(!m->st)) {
+ char type[RRD_ID_LENGTH_MAX + 1], id[RRD_ID_LENGTH_MAX + 1];
+ statsd_get_metric_type_and_id(m, type, id, "set", RRD_ID_LENGTH_MAX);
+
+ char context[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(context, RRD_ID_LENGTH_MAX, "statsd_set.%s", m->name);
+
+ char title[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(title, RRD_ID_LENGTH_MAX, "statsd private chart for set %s", m->name);
+
+ m->st = statsd_private_rrdset_create(
+ m
+ , type
+ , id
+ , NULL // name
+ , "sets" // family (submenu)
+ , context // context
+ , title // title
+ , "entries" // units
+ , NETDATA_CHART_PRIO_STATSD_PRIVATE
+ , statsd.update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ m->rd_value = rrddim_add(m->st, "set", "set size", 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ if(m->options & STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT)
+ m->rd_count = rrddim_add(m->st, "events", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(m->st);
+
+ rrddim_set_by_pointer(m->st, m->rd_value, m->last);
+
+ if(m->rd_count)
+ rrddim_set_by_pointer(m->st, m->rd_count, m->events);
+
+ rrdset_done(m->st);
+}
+
+static inline void statsd_private_chart_timer_or_histogram(STATSD_METRIC *m, const char *dim, const char *family, const char *units) {
+ debug(D_STATSD, "updating private chart for %s metric '%s'", dim, m->name);
+
+ if(unlikely(!m->st)) {
+ char type[RRD_ID_LENGTH_MAX + 1], id[RRD_ID_LENGTH_MAX + 1];
+ statsd_get_metric_type_and_id(m, type, id, dim, RRD_ID_LENGTH_MAX);
+
+ char context[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(context, RRD_ID_LENGTH_MAX, "statsd_%s.%s", dim, m->name);
+
+ char title[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(title, RRD_ID_LENGTH_MAX, "statsd private chart for %s %s", dim, m->name);
+
+ m->st = statsd_private_rrdset_create(
+ m
+ , type
+ , id
+ , NULL // name
+ , family // family (submenu)
+ , context // context
+ , title // title
+ , units // units
+ , NETDATA_CHART_PRIO_STATSD_PRIVATE
+ , statsd.update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ m->histogram.ext->rd_min = rrddim_add(m->st, "min", NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE);
+ m->histogram.ext->rd_max = rrddim_add(m->st, "max", NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE);
+ m->rd_value = rrddim_add(m->st, "average", NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE);
+ m->histogram.ext->rd_percentile = rrddim_add(m->st, statsd.histogram_percentile_str, NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE);
+ m->histogram.ext->rd_median = rrddim_add(m->st, "median", NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE);
+ m->histogram.ext->rd_stddev = rrddim_add(m->st, "stddev", NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE);
+ m->histogram.ext->rd_sum = rrddim_add(m->st, "sum", NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE);
+
+ if(m->options & STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT)
+ m->rd_count = rrddim_add(m->st, "events", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(m->st);
+
+ rrddim_set_by_pointer(m->st, m->histogram.ext->rd_min, m->histogram.ext->last_min);
+ rrddim_set_by_pointer(m->st, m->histogram.ext->rd_max, m->histogram.ext->last_max);
+ rrddim_set_by_pointer(m->st, m->histogram.ext->rd_percentile, m->histogram.ext->last_percentile);
+ rrddim_set_by_pointer(m->st, m->histogram.ext->rd_median, m->histogram.ext->last_median);
+ rrddim_set_by_pointer(m->st, m->histogram.ext->rd_stddev, m->histogram.ext->last_stddev);
+ rrddim_set_by_pointer(m->st, m->histogram.ext->rd_sum, m->histogram.ext->last_sum);
+ rrddim_set_by_pointer(m->st, m->rd_value, m->last);
+
+ if(m->rd_count)
+ rrddim_set_by_pointer(m->st, m->rd_count, m->events);
+
+ rrdset_done(m->st);
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// statsd flush metrics
+
+static inline void statsd_flush_gauge(STATSD_METRIC *m) {
+ debug(D_STATSD, "flushing gauge metric '%s'", m->name);
+
+ int updated = 0;
+ if(unlikely(!m->reset && m->count)) {
+ m->last = (collected_number) (m->gauge.value * statsd.decimal_detail);
+
+ m->reset = 1;
+ updated = 1;
+ }
+
+ if(unlikely(m->options & STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED && (updated || !(m->options & STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED))))
+ statsd_private_chart_gauge(m);
+}
+
+static inline void statsd_flush_counter_or_meter(STATSD_METRIC *m, const char *dim, const char *family) {
+ debug(D_STATSD, "flushing %s metric '%s'", dim, m->name);
+
+ int updated = 0;
+ if(unlikely(!m->reset && m->count)) {
+ m->last = m->counter.value;
+
+ m->reset = 1;
+ updated = 1;
+ }
+
+ if(unlikely(m->options & STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED && (updated || !(m->options & STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED))))
+ statsd_private_chart_counter_or_meter(m, dim, family);
+}
+
+static inline void statsd_flush_counter(STATSD_METRIC *m) {
+ statsd_flush_counter_or_meter(m, "counter", "counters");
+}
+
+static inline void statsd_flush_meter(STATSD_METRIC *m) {
+ statsd_flush_counter_or_meter(m, "meter", "meters");
+}
+
+static inline void statsd_flush_set(STATSD_METRIC *m) {
+ debug(D_STATSD, "flushing set metric '%s'", m->name);
+
+ int updated = 0;
+ if(unlikely(!m->reset && m->count)) {
+ m->last = (collected_number)m->set.unique;
+
+ m->reset = 1;
+ updated = 1;
+ }
+ else {
+ m->last = 0;
+ }
+
+ if(unlikely(m->options & STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED && (updated || !(m->options & STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED))))
+ statsd_private_chart_set(m);
+}
+
+static inline void statsd_flush_timer_or_histogram(STATSD_METRIC *m, const char *dim, const char *family, const char *units) {
+ debug(D_STATSD, "flushing %s metric '%s'", dim, m->name);
+
+ int updated = 0;
+ if(unlikely(!m->reset && m->count && m->histogram.ext->used > 0)) {
+ netdata_mutex_lock(&m->histogram.ext->mutex);
+
+ size_t len = m->histogram.ext->used;
+ LONG_DOUBLE *series = m->histogram.ext->values;
+ sort_series(series, len);
+
+ m->histogram.ext->last_min = (collected_number)roundl(series[0] * statsd.decimal_detail);
+ m->histogram.ext->last_max = (collected_number)roundl(series[len - 1] * statsd.decimal_detail);
+ m->last = (collected_number)roundl(average(series, len) * statsd.decimal_detail);
+ m->histogram.ext->last_median = (collected_number)roundl(median_on_sorted_series(series, len) * statsd.decimal_detail);
+ m->histogram.ext->last_stddev = (collected_number)roundl(standard_deviation(series, len) * statsd.decimal_detail);
+ m->histogram.ext->last_sum = (collected_number)roundl(sum(series, len) * statsd.decimal_detail);
+
+ size_t pct_len = (size_t)floor((double)len * statsd.histogram_percentile / 100.0);
+ if(pct_len < 1)
+ m->histogram.ext->last_percentile = (collected_number)(series[0] * statsd.decimal_detail);
+ else
+ m->histogram.ext->last_percentile = (collected_number)roundl(series[pct_len - 1] * statsd.decimal_detail);
+
+ netdata_mutex_unlock(&m->histogram.ext->mutex);
+
+ debug(D_STATSD, "STATSD %s metric %s: min " COLLECTED_NUMBER_FORMAT ", max " COLLECTED_NUMBER_FORMAT ", last " COLLECTED_NUMBER_FORMAT ", pcent " COLLECTED_NUMBER_FORMAT ", median " COLLECTED_NUMBER_FORMAT ", stddev " COLLECTED_NUMBER_FORMAT ", sum " COLLECTED_NUMBER_FORMAT,
+ dim, m->name, m->histogram.ext->last_min, m->histogram.ext->last_max, m->last, m->histogram.ext->last_percentile, m->histogram.ext->last_median, m->histogram.ext->last_stddev, m->histogram.ext->last_sum);
+
+ m->histogram.ext->zeroed = 0;
+ m->reset = 1;
+ updated = 1;
+ }
+ else if(unlikely(!m->histogram.ext->zeroed)) {
+ // reset the metrics
+ // if we collected anything, they will be updated below
+ // this ensures that we report zeros if nothing is collected
+
+ m->histogram.ext->last_min = 0;
+ m->histogram.ext->last_max = 0;
+ m->last = 0;
+ m->histogram.ext->last_median = 0;
+ m->histogram.ext->last_stddev = 0;
+ m->histogram.ext->last_sum = 0;
+ m->histogram.ext->last_percentile = 0;
+
+ m->histogram.ext->zeroed = 1;
+ }
+
+ if(unlikely(m->options & STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED && (updated || !(m->options & STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED))))
+ statsd_private_chart_timer_or_histogram(m, dim, family, units);
+}
+
+static inline void statsd_flush_timer(STATSD_METRIC *m) {
+ statsd_flush_timer_or_histogram(m, "timer", "timers", "milliseconds");
+}
+
+static inline void statsd_flush_histogram(STATSD_METRIC *m) {
+ statsd_flush_timer_or_histogram(m, "histogram", "histograms", "value");
+}
+
+static inline RRD_ALGORITHM statsd_algorithm_for_metric(STATSD_METRIC *m) {
+ switch(m->type) {
+ default:
+ case STATSD_METRIC_TYPE_GAUGE:
+ case STATSD_METRIC_TYPE_SET:
+ case STATSD_METRIC_TYPE_TIMER:
+ case STATSD_METRIC_TYPE_HISTOGRAM:
+ return RRD_ALGORITHM_ABSOLUTE;
+
+ case STATSD_METRIC_TYPE_METER:
+ case STATSD_METRIC_TYPE_COUNTER:
+ return RRD_ALGORITHM_INCREMENTAL;
+ }
+}
+
+static inline void link_metric_to_app_dimension(STATSD_APP *app, STATSD_METRIC *m, STATSD_APP_CHART *chart, STATSD_APP_CHART_DIM *dim) {
+ if(dim->value_type == STATSD_APP_CHART_DIM_VALUE_TYPE_EVENTS) {
+ dim->value_ptr = &m->events;
+ dim->algorithm = RRD_ALGORITHM_INCREMENTAL;
+ }
+ else if(m->type == STATSD_METRIC_TYPE_HISTOGRAM || m->type == STATSD_METRIC_TYPE_TIMER) {
+ dim->algorithm = RRD_ALGORITHM_ABSOLUTE;
+ dim->divisor *= statsd.decimal_detail;
+
+ switch(dim->value_type) {
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_EVENTS:
+ // will never match - added to avoid warning
+ break;
+
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_LAST:
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_AVERAGE:
+ dim->value_ptr = &m->last;
+ break;
+
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_SUM:
+ dim->value_ptr = &m->histogram.ext->last_sum;
+ break;
+
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_MIN:
+ dim->value_ptr = &m->histogram.ext->last_min;
+ break;
+
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_MAX:
+ dim->value_ptr = &m->histogram.ext->last_max;
+ break;
+
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_MEDIAN:
+ dim->value_ptr = &m->histogram.ext->last_median;
+ break;
+
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_PERCENTILE:
+ dim->value_ptr = &m->histogram.ext->last_percentile;
+ break;
+
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_STDDEV:
+ dim->value_ptr = &m->histogram.ext->last_stddev;
+ break;
+ }
+ }
+ else {
+ if (dim->value_type != STATSD_APP_CHART_DIM_VALUE_TYPE_LAST)
+ error("STATSD: unsupported value type for dimension '%s' of chart '%s' of app '%s' on metric '%s'", dim->name, chart->id, app->name, m->name);
+
+ dim->value_ptr = &m->last;
+ dim->algorithm = statsd_algorithm_for_metric(m);
+
+ if(m->type == STATSD_METRIC_TYPE_GAUGE)
+ dim->divisor *= statsd.decimal_detail;
+ }
+
+ if(unlikely(chart->st && dim->rd)) {
+ rrddim_set_algorithm(chart->st, dim->rd, dim->algorithm);
+ rrddim_set_multiplier(chart->st, dim->rd, dim->multiplier);
+ rrddim_set_divisor(chart->st, dim->rd, dim->divisor);
+ }
+
+ chart->dimensions_linked_count++;
+ m->options |= STATSD_METRIC_OPTION_USED_IN_APPS;
+ debug(D_STATSD, "metric '%s' of type %u linked with app '%s', chart '%s', dimension '%s', algorithm '%s'", m->name, m->type, app->name, chart->id, dim->name, rrd_algorithm_name(dim->algorithm));
+}
+
+static inline void check_if_metric_is_for_app(STATSD_INDEX *index, STATSD_METRIC *m) {
+ (void)index;
+
+ STATSD_APP *app;
+ for(app = statsd.apps; app ;app = app->next) {
+ if(unlikely(simple_pattern_matches(app->metrics, m->name))) {
+ debug(D_STATSD, "metric '%s' matches app '%s'", m->name, app->name);
+
+ // the metric should get the options from the app
+
+ if(app->default_options & STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED)
+ m->options |= STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED;
+ else
+ m->options &= ~STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED;
+
+ if(app->default_options & STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED)
+ m->options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED;
+ else
+ m->options &= ~STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED;
+
+ m->options |= STATSD_METRIC_OPTION_PRIVATE_CHART_CHECKED;
+
+ // check if there is a chart in this app, willing to get this metric
+ STATSD_APP_CHART *chart;
+ for(chart = app->charts; chart; chart = chart->next) {
+
+ STATSD_APP_CHART_DIM *dim;
+ for(dim = chart->dimensions; dim ; dim = dim->next) {
+ if(unlikely(dim->metric_pattern)) {
+ size_t dim_name_len = strlen(dim->name);
+ size_t wildcarded_len = dim_name_len + strlen(m->name) + 1;
+ char wildcarded[wildcarded_len];
+
+ strcpy(wildcarded, dim->name);
+ char *ws = &wildcarded[dim_name_len];
+
+ if(simple_pattern_matches_extract(dim->metric_pattern, m->name, ws, wildcarded_len - dim_name_len)) {
+
+ char *final_name = NULL;
+
+ if(app->dict) {
+ if(likely(*wildcarded)) {
+ // use the name of the wildcarded string
+ final_name = dictionary_get(app->dict, wildcarded);
+ }
+
+ if(unlikely(!final_name)) {
+ // use the name of the metric
+ final_name = dictionary_get(app->dict, m->name);
+ }
+ }
+
+ if(unlikely(!final_name))
+ final_name = wildcarded;
+
+ add_dimension_to_app_chart(
+ app
+ , chart
+ , m->name
+ , final_name
+ , dim->multiplier
+ , dim->divisor
+ , dim->flags
+ , dim->value_type
+ );
+
+ // the new dimension is appended to the list
+ // so, it will be matched and linked later too
+ }
+ }
+ else if(!dim->value_ptr && dim->metric_hash == m->hash && !strcmp(dim->metric, m->name)) {
+ // we have a match - this metric should be linked to this dimension
+ link_metric_to_app_dimension(app, m, chart, dim);
+ }
+ }
+
+ }
+ }
+ }
+}
+
+static inline RRDDIM *statsd_add_dim_to_app_chart(STATSD_APP *app, STATSD_APP_CHART *chart, STATSD_APP_CHART_DIM *dim) {
+ (void)app;
+
+ // allow the same statsd metric to be added multiple times to the same chart
+
+ STATSD_APP_CHART_DIM *tdim;
+ size_t count_same_metric = 0, count_same_metric_value_type = 0;
+ size_t pos_same_metric_value_type = 0;
+
+ for (tdim = chart->dimensions; tdim && tdim->next; tdim = tdim->next) {
+ if (dim->metric_hash == tdim->metric_hash && !strcmp(dim->metric, tdim->metric)) {
+ count_same_metric++;
+
+ if(dim->value_type == tdim->value_type) {
+ count_same_metric_value_type++;
+ if (tdim == dim)
+ pos_same_metric_value_type = count_same_metric_value_type;
+ }
+ }
+ }
+
+ if(count_same_metric > 1) {
+ // the same metric is found multiple times
+
+ size_t len = strlen(dim->metric) + 100;
+ char metric[ len + 1 ];
+
+ if(count_same_metric_value_type > 1) {
+ // the same metric, with the same value type, is added multiple times
+ snprintfz(metric, len, "%s_%s%zu", dim->metric, valuetype2string(dim->value_type), pos_same_metric_value_type);
+ }
+ else {
+ // the same metric, with different value type is added
+ snprintfz(metric, len, "%s_%s", dim->metric, valuetype2string(dim->value_type));
+ }
+
+ dim->rd = rrddim_add(chart->st, metric, dim->name, dim->multiplier, dim->divisor, dim->algorithm);
+ if(dim->flags != RRDDIM_FLAG_NONE) dim->rd->flags |= dim->flags;
+ return dim->rd;
+ }
+
+ dim->rd = rrddim_add(chart->st, dim->metric, dim->name, dim->multiplier, dim->divisor, dim->algorithm);
+ if(dim->flags != RRDDIM_FLAG_NONE) dim->rd->flags |= dim->flags;
+ return dim->rd;
+}
+
+static inline void statsd_update_app_chart(STATSD_APP *app, STATSD_APP_CHART *chart) {
+ debug(D_STATSD, "updating chart '%s' for app '%s'", chart->id, app->name);
+
+ if(!chart->st) {
+ chart->st = rrdset_create_custom(
+ localhost // host
+ , app->name // type
+ , chart->id // id
+ , chart->name // name
+ , chart->family // family
+ , chart->context // context
+ , chart->title // title
+ , chart->units // units
+ , PLUGIN_STATSD_NAME // plugin
+ , chart->source // module
+ , chart->priority // priority
+ , statsd.update_every // update every
+ , chart->chart_type // chart type
+ , app->rrd_memory_mode // memory mode
+ , app->rrd_history_entries // history
+ );
+
+ rrdset_flag_set(chart->st, RRDSET_FLAG_STORE_FIRST);
+ // rrdset_flag_set(chart->st, RRDSET_FLAG_DEBUG);
+ }
+ else rrdset_next(chart->st);
+
+ STATSD_APP_CHART_DIM *dim;
+ for(dim = chart->dimensions; dim ;dim = dim->next) {
+ if(likely(!dim->metric_pattern)) {
+ if (unlikely(!dim->rd))
+ statsd_add_dim_to_app_chart(app, chart, dim);
+
+ if (unlikely(dim->value_ptr)) {
+ debug(D_STATSD, "updating dimension '%s' (%s) of chart '%s' (%s) for app '%s' with value " COLLECTED_NUMBER_FORMAT, dim->name, dim->rd->id, chart->id, chart->st->id, app->name, *dim->value_ptr);
+ rrddim_set_by_pointer(chart->st, dim->rd, *dim->value_ptr);
+ }
+ }
+ }
+
+ rrdset_done(chart->st);
+ debug(D_STATSD, "completed update of chart '%s' for app '%s'", chart->id, app->name);
+}
+
+static inline void statsd_update_all_app_charts(void) {
+ // debug(D_STATSD, "updating app charts");
+
+ STATSD_APP *app;
+ for(app = statsd.apps; app ;app = app->next) {
+ // debug(D_STATSD, "updating charts for app '%s'", app->name);
+
+ STATSD_APP_CHART *chart;
+ for(chart = app->charts; chart ;chart = chart->next) {
+ if(unlikely(chart->dimensions_linked_count)) {
+ statsd_update_app_chart(app, chart);
+ }
+ }
+ }
+
+ // debug(D_STATSD, "completed update of app charts");
+}
+
+const char *statsd_metric_type_string(STATSD_METRIC_TYPE type) {
+ switch(type) {
+ case STATSD_METRIC_TYPE_COUNTER: return "counter";
+ case STATSD_METRIC_TYPE_GAUGE: return "gauge";
+ case STATSD_METRIC_TYPE_HISTOGRAM: return "histogram";
+ case STATSD_METRIC_TYPE_METER: return "meter";
+ case STATSD_METRIC_TYPE_SET: return "set";
+ case STATSD_METRIC_TYPE_TIMER: return "timer";
+ default: return "unknown";
+ }
+}
+
+static inline void statsd_flush_index_metrics(STATSD_INDEX *index, void (*flush_metric)(STATSD_METRIC *)) {
+ STATSD_METRIC *m;
+
+ // find the useful metrics (incremental = each time we are called, we check the new metrics only)
+ for(m = index->first; m ; m = m->next) {
+ // since we add new metrics at the beginning
+ // check for useful charts, until the point we last checked
+ if(unlikely(is_metric_checked(m))) break;
+
+ if(unlikely(!(m->options & STATSD_METRIC_OPTION_CHECKED_IN_APPS))) {
+ log_access("NEW STATSD METRIC '%s': '%s'", statsd_metric_type_string(m->type), m->name);
+ check_if_metric_is_for_app(index, m);
+ m->options |= STATSD_METRIC_OPTION_CHECKED_IN_APPS;
+ }
+
+ if(unlikely(!(m->options & STATSD_METRIC_OPTION_PRIVATE_CHART_CHECKED))) {
+ if(unlikely(statsd.private_charts >= statsd.max_private_charts_hard)) {
+ debug(D_STATSD, "STATSD: metric '%s' will not be charted, because the hard limit of the maximum number of charts has been reached.", m->name);
+ info("STATSD: metric '%s' will not be charted, because the hard limit of the maximum number of charts (%zu) has been reached. Increase the number of charts by editing netdata.conf, [statsd] section.", m->name, statsd.max_private_charts);
+ m->options &= ~STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED;
+ }
+ else {
+ if (simple_pattern_matches(statsd.charts_for, m->name)) {
+ debug(D_STATSD, "STATSD: metric '%s' will be charted.", m->name);
+ m->options |= STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED;
+ } else {
+ debug(D_STATSD, "STATSD: metric '%s' will not be charted.", m->name);
+ m->options &= ~STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED;
+ }
+ }
+
+ m->options |= STATSD_METRIC_OPTION_PRIVATE_CHART_CHECKED;
+ }
+
+ // mark it as checked
+ m->options |= STATSD_METRIC_OPTION_CHECKED;
+
+ // check if it is used in charts
+ if((m->options & (STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED|STATSD_METRIC_OPTION_USED_IN_APPS)) && !(m->options & STATSD_METRIC_OPTION_USEFUL)) {
+ m->options |= STATSD_METRIC_OPTION_USEFUL;
+ index->useful++;
+ m->next_useful = index->first_useful;
+ index->first_useful = m;
+ }
+ }
+
+ // flush all the useful metrics
+ for(m = index->first_useful; m ; m = m->next_useful) {
+ flush_metric(m);
+ }
+}
+
+
+// --------------------------------------------------------------------------------------
+// statsd main thread
+
+static int statsd_listen_sockets_setup(void) {
+ return listen_sockets_setup(&statsd.sockets);
+}
+
+static void statsd_main_cleanup(void *data) {
+ struct netdata_static_thread *static_thread = (struct netdata_static_thread *)data;
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
+ info("cleaning up...");
+
+ if (statsd.collection_threads_status) {
+ int i;
+ for (i = 0; i < statsd.threads; i++) {
+ if(statsd.collection_threads_status[i].status) {
+ info("STATSD: stopping data collection thread %d...", i + 1);
+ netdata_thread_cancel(statsd.collection_threads_status[i].thread);
+ }
+ else {
+ info("STATSD: data collection thread %d found stopped.", i + 1);
+ }
+ }
+ }
+
+ info("STATSD: closing sockets...");
+ listen_sockets_close(&statsd.sockets);
+
+ info("STATSD: cleanup completed.");
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+}
+
+void *statsd_main(void *ptr) {
+ netdata_thread_cleanup_push(statsd_main_cleanup, ptr);
+
+ // ----------------------------------------------------------------------------------------------------------------
+ // statsd configuration
+
+ statsd.enabled = config_get_boolean(CONFIG_SECTION_STATSD, "enabled", statsd.enabled);
+
+ statsd.update_every = default_rrd_update_every;
+ statsd.update_every = (int)config_get_number(CONFIG_SECTION_STATSD, "update every (flushInterval)", statsd.update_every);
+ if(statsd.update_every < default_rrd_update_every) {
+ error("STATSD: minimum flush interval %d given, but the minimum is the update every of netdata. Using %d", statsd.update_every, default_rrd_update_every);
+ statsd.update_every = default_rrd_update_every;
+ }
+
+#ifdef HAVE_RECVMMSG
+ statsd.recvmmsg_size = (size_t)config_get_number(CONFIG_SECTION_STATSD, "udp messages to process at once", (long long)statsd.recvmmsg_size);
+#endif
+
+ statsd.charts_for = simple_pattern_create(config_get(CONFIG_SECTION_STATSD, "create private charts for metrics matching", "*"), NULL, SIMPLE_PATTERN_EXACT);
+ statsd.max_private_charts = (size_t)config_get_number(CONFIG_SECTION_STATSD, "max private charts allowed", (long long)statsd.max_private_charts);
+ statsd.max_private_charts_hard = (size_t)config_get_number(CONFIG_SECTION_STATSD, "max private charts hard limit", (long long)statsd.max_private_charts * 5);
+ statsd.private_charts_memory_mode = rrd_memory_mode_id(config_get(CONFIG_SECTION_STATSD, "private charts memory mode", rrd_memory_mode_name(default_rrd_memory_mode)));
+ statsd.private_charts_rrd_history_entries = (int)config_get_number(CONFIG_SECTION_STATSD, "private charts history", default_rrd_history_entries);
+ statsd.decimal_detail = (collected_number)config_get_number(CONFIG_SECTION_STATSD, "decimal detail", (long long int)statsd.decimal_detail);
+ statsd.tcp_idle_timeout = (size_t) config_get_number(CONFIG_SECTION_STATSD, "disconnect idle tcp clients after seconds", (long long int)statsd.tcp_idle_timeout);
+ statsd.private_charts_hidden = (unsigned int)config_get_boolean(CONFIG_SECTION_STATSD, "private charts hidden", statsd.private_charts_hidden);
+
+ statsd.histogram_percentile = (double)config_get_float(CONFIG_SECTION_STATSD, "histograms and timers percentile (percentThreshold)", statsd.histogram_percentile);
+ if(isless(statsd.histogram_percentile, 0) || isgreater(statsd.histogram_percentile, 100)) {
+ error("STATSD: invalid histograms and timers percentile %0.5f given", statsd.histogram_percentile);
+ statsd.histogram_percentile = 95.0;
+ }
+ {
+ char buffer[100 + 1];
+ snprintf(buffer, 100, "%0.1f%%", statsd.histogram_percentile);
+ statsd.histogram_percentile_str = strdupz(buffer);
+ }
+
+ if(config_get_boolean(CONFIG_SECTION_STATSD, "add dimension for number of events received", 1)) {
+ statsd.gauges.default_options |= STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT;
+ statsd.counters.default_options |= STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT;
+ statsd.meters.default_options |= STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT;
+ statsd.sets.default_options |= STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT;
+ statsd.histograms.default_options |= STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT;
+ statsd.timers.default_options |= STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT;
+ }
+
+ if(config_get_boolean(CONFIG_SECTION_STATSD, "gaps on gauges (deleteGauges)", 0))
+ statsd.gauges.default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED;
+
+ if(config_get_boolean(CONFIG_SECTION_STATSD, "gaps on counters (deleteCounters)", 0))
+ statsd.counters.default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED;
+
+ if(config_get_boolean(CONFIG_SECTION_STATSD, "gaps on meters (deleteMeters)", 0))
+ statsd.meters.default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED;
+
+ if(config_get_boolean(CONFIG_SECTION_STATSD, "gaps on sets (deleteSets)", 0))
+ statsd.sets.default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED;
+
+ if(config_get_boolean(CONFIG_SECTION_STATSD, "gaps on histograms (deleteHistograms)", 0))
+ statsd.histograms.default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED;
+
+ if(config_get_boolean(CONFIG_SECTION_STATSD, "gaps on timers (deleteTimers)", 0))
+ statsd.timers.default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED;
+
+ size_t max_sockets = (size_t)config_get_number(CONFIG_SECTION_STATSD, "statsd server max TCP sockets", (long long int)(rlimit_nofile.rlim_cur / 4));
+
+#ifdef STATSD_MULTITHREADED
+ statsd.threads = (int)config_get_number(CONFIG_SECTION_STATSD, "threads", processors);
+ if(statsd.threads < 1) {
+ error("STATSD: Invalid number of threads %d, using %d", statsd.threads, processors);
+ statsd.threads = processors;
+ config_set_number(CONFIG_SECTION_STATSD, "collector threads", statsd.threads);
+ }
+#else
+ statsd.threads = 1;
+#endif
+
+ // read custom application definitions
+ statsd_readdir(netdata_configured_user_config_dir, netdata_configured_stock_config_dir, "statsd.d");
+
+ // ----------------------------------------------------------------------------------------------------------------
+ // statsd setup
+
+ if(!statsd.enabled) return NULL;
+
+ statsd_listen_sockets_setup();
+ if(!statsd.sockets.opened) {
+ error("STATSD: No statsd sockets to listen to. statsd will be disabled.");
+ goto cleanup;
+ }
+
+ statsd.collection_threads_status = callocz((size_t)statsd.threads, sizeof(struct collection_thread_status));
+
+ int i;
+ for(i = 0; i < statsd.threads ;i++) {
+ statsd.collection_threads_status[i].max_sockets = max_sockets / statsd.threads;
+ char tag[NETDATA_THREAD_TAG_MAX + 1];
+ snprintfz(tag, NETDATA_THREAD_TAG_MAX, "STATSD_COLLECTOR[%d]", i + 1);
+ netdata_thread_create(&statsd.collection_threads_status[i].thread, tag, NETDATA_THREAD_OPTION_DEFAULT, statsd_collector_thread, &statsd.collection_threads_status[i]);
+ }
+
+ // ----------------------------------------------------------------------------------------------------------------
+ // statsd monitoring charts
+
+ RRDSET *st_metrics = rrdset_create_localhost(
+ "netdata"
+ , "statsd_metrics"
+ , NULL
+ , "statsd"
+ , NULL
+ , "Metrics in the netdata statsd database"
+ , "metrics"
+ , PLUGIN_STATSD_NAME
+ , "stats"
+ , 132010
+ , statsd.update_every
+ , RRDSET_TYPE_STACKED
+ );
+ RRDDIM *rd_metrics_gauge = rrddim_add(st_metrics, "gauges", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ RRDDIM *rd_metrics_counter = rrddim_add(st_metrics, "counters", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ RRDDIM *rd_metrics_timer = rrddim_add(st_metrics, "timers", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ RRDDIM *rd_metrics_meter = rrddim_add(st_metrics, "meters", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ RRDDIM *rd_metrics_histogram = rrddim_add(st_metrics, "histograms", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ RRDDIM *rd_metrics_set = rrddim_add(st_metrics, "sets", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ RRDSET *st_useful_metrics = rrdset_create_localhost(
+ "netdata"
+ , "statsd_useful_metrics"
+ , NULL
+ , "statsd"
+ , NULL
+ , "Useful metrics in the netdata statsd database"
+ , "metrics"
+ , PLUGIN_STATSD_NAME
+ , "stats"
+ , 132010
+ , statsd.update_every
+ , RRDSET_TYPE_STACKED
+ );
+ RRDDIM *rd_useful_metrics_gauge = rrddim_add(st_useful_metrics, "gauges", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ RRDDIM *rd_useful_metrics_counter = rrddim_add(st_useful_metrics, "counters", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ RRDDIM *rd_useful_metrics_timer = rrddim_add(st_useful_metrics, "timers", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ RRDDIM *rd_useful_metrics_meter = rrddim_add(st_useful_metrics, "meters", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ RRDDIM *rd_useful_metrics_histogram = rrddim_add(st_useful_metrics, "histograms", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ RRDDIM *rd_useful_metrics_set = rrddim_add(st_useful_metrics, "sets", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ RRDSET *st_events = rrdset_create_localhost(
+ "netdata"
+ , "statsd_events"
+ , NULL
+ , "statsd"
+ , NULL
+ , "Events processed by the netdata statsd server"
+ , "events/s"
+ , PLUGIN_STATSD_NAME
+ , "stats"
+ , 132011
+ , statsd.update_every
+ , RRDSET_TYPE_STACKED
+ );
+ RRDDIM *rd_events_gauge = rrddim_add(st_events, "gauges", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ RRDDIM *rd_events_counter = rrddim_add(st_events, "counters", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ RRDDIM *rd_events_timer = rrddim_add(st_events, "timers", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ RRDDIM *rd_events_meter = rrddim_add(st_events, "meters", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ RRDDIM *rd_events_histogram = rrddim_add(st_events, "histograms", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ RRDDIM *rd_events_set = rrddim_add(st_events, "sets", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ RRDDIM *rd_events_unknown = rrddim_add(st_events, "unknown", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ RRDDIM *rd_events_errors = rrddim_add(st_events, "errors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ RRDSET *st_reads = rrdset_create_localhost(
+ "netdata"
+ , "statsd_reads"
+ , NULL
+ , "statsd"
+ , NULL
+ , "Read operations made by the netdata statsd server"
+ , "reads/s"
+ , PLUGIN_STATSD_NAME
+ , "stats"
+ , 132012
+ , statsd.update_every
+ , RRDSET_TYPE_STACKED
+ );
+ RRDDIM *rd_reads_tcp = rrddim_add(st_reads, "tcp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ RRDDIM *rd_reads_udp = rrddim_add(st_reads, "udp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ RRDSET *st_bytes = rrdset_create_localhost(
+ "netdata"
+ , "statsd_bytes"
+ , NULL
+ , "statsd"
+ , NULL
+ , "Bytes read by the netdata statsd server"
+ , "kilobits/s"
+ , PLUGIN_STATSD_NAME
+ , "stats"
+ , 132013
+ , statsd.update_every
+ , RRDSET_TYPE_STACKED
+ );
+ RRDDIM *rd_bytes_tcp = rrddim_add(st_bytes, "tcp", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ RRDDIM *rd_bytes_udp = rrddim_add(st_bytes, "udp", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+
+ RRDSET *st_packets = rrdset_create_localhost(
+ "netdata"
+ , "statsd_packets"
+ , NULL
+ , "statsd"
+ , NULL
+ , "Network packets processed by the netdata statsd server"
+ , "packets/s"
+ , PLUGIN_STATSD_NAME
+ , "stats"
+ , 132014
+ , statsd.update_every
+ , RRDSET_TYPE_STACKED
+ );
+ RRDDIM *rd_packets_tcp = rrddim_add(st_packets, "tcp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ RRDDIM *rd_packets_udp = rrddim_add(st_packets, "udp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ RRDSET *st_tcp_connects = rrdset_create_localhost(
+ "netdata"
+ , "tcp_connects"
+ , NULL
+ , "statsd"
+ , NULL
+ , "statsd server TCP connects and disconnects"
+ , "events"
+ , PLUGIN_STATSD_NAME
+ , "stats"
+ , 132015
+ , statsd.update_every
+ , RRDSET_TYPE_LINE
+ );
+ RRDDIM *rd_tcp_connects = rrddim_add(st_tcp_connects, "connects", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ RRDDIM *rd_tcp_disconnects = rrddim_add(st_tcp_connects, "disconnects", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ RRDSET *st_tcp_connected = rrdset_create_localhost(
+ "netdata"
+ , "tcp_connected"
+ , NULL
+ , "statsd"
+ , NULL
+ , "statsd server TCP connected sockets"
+ , "connected"
+ , PLUGIN_STATSD_NAME
+ , "stats"
+ , 132016
+ , statsd.update_every
+ , RRDSET_TYPE_LINE
+ );
+ RRDDIM *rd_tcp_connected = rrddim_add(st_tcp_connected, "connected", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ RRDSET *st_pcharts = rrdset_create_localhost(
+ "netdata"
+ , "private_charts"
+ , NULL
+ , "statsd"
+ , NULL
+ , "Private metric charts created by the netdata statsd server"
+ , "charts"
+ , PLUGIN_STATSD_NAME
+ , "stats"
+ , 132020
+ , statsd.update_every
+ , RRDSET_TYPE_AREA
+ );
+ RRDDIM *rd_pcharts = rrddim_add(st_pcharts, "charts", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ RRDSET *stcpu_thread = rrdset_create_localhost(
+ "netdata"
+ , "plugin_statsd_charting_cpu"
+ , NULL
+ , "statsd"
+ , "netdata.statsd_cpu"
+ , "NetData statsd charting thread CPU usage"
+ , "milliseconds/s"
+ , PLUGIN_STATSD_NAME
+ , "stats"
+ , 132001
+ , statsd.update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ RRDDIM *rd_user = rrddim_add(stcpu_thread, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ RRDDIM *rd_system = rrddim_add(stcpu_thread, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ struct rusage thread;
+
+ for(i = 0; i < statsd.threads ;i++) {
+ char id[100 + 1];
+ char title[100 + 1];
+
+ snprintfz(id, 100, "plugin_statsd_collector%d_cpu", i + 1);
+ snprintfz(title, 100, "NetData statsd collector thread No %d CPU usage", i + 1);
+
+ statsd.collection_threads_status[i].st_cpu = rrdset_create_localhost(
+ "netdata"
+ , id
+ , NULL
+ , "statsd"
+ , "netdata.statsd_cpu"
+ , title
+ , "milliseconds/s"
+ , PLUGIN_STATSD_NAME
+ , "stats"
+ , 132002 + i
+ , statsd.update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ statsd.collection_threads_status[i].rd_user = rrddim_add(statsd.collection_threads_status[i].st_cpu, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ statsd.collection_threads_status[i].rd_system = rrddim_add(statsd.collection_threads_status[i].st_cpu, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ }
+
+ // ----------------------------------------------------------------------------------------------------------------
+ // statsd thread to turn metrics into charts
+
+ usec_t step = statsd.update_every * USEC_PER_SEC;
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ while(!netdata_exit) {
+ usec_t hb_dt = heartbeat_next(&hb, step);
+
+ statsd_flush_index_metrics(&statsd.gauges, statsd_flush_gauge);
+ statsd_flush_index_metrics(&statsd.counters, statsd_flush_counter);
+ statsd_flush_index_metrics(&statsd.meters, statsd_flush_meter);
+ statsd_flush_index_metrics(&statsd.timers, statsd_flush_timer);
+ statsd_flush_index_metrics(&statsd.histograms, statsd_flush_histogram);
+ statsd_flush_index_metrics(&statsd.sets, statsd_flush_set);
+
+ statsd_update_all_app_charts();
+
+ getrusage(RUSAGE_THREAD, &thread);
+
+ if(unlikely(netdata_exit))
+ break;
+
+ if(likely(hb_dt)) {
+ rrdset_next(st_metrics);
+ rrdset_next(st_useful_metrics);
+ rrdset_next(st_events);
+ rrdset_next(st_reads);
+ rrdset_next(st_bytes);
+ rrdset_next(st_packets);
+ rrdset_next(st_tcp_connects);
+ rrdset_next(st_tcp_connected);
+ rrdset_next(st_pcharts);
+ rrdset_next(stcpu_thread);
+ for(i = 0; i < statsd.threads ;i++)
+ rrdset_next(statsd.collection_threads_status[i].st_cpu);
+ }
+
+ rrddim_set_by_pointer(st_metrics, rd_metrics_gauge, (collected_number)statsd.gauges.metrics);
+ rrddim_set_by_pointer(st_metrics, rd_metrics_counter, (collected_number)statsd.counters.metrics);
+ rrddim_set_by_pointer(st_metrics, rd_metrics_timer, (collected_number)statsd.timers.metrics);
+ rrddim_set_by_pointer(st_metrics, rd_metrics_meter, (collected_number)statsd.meters.metrics);
+ rrddim_set_by_pointer(st_metrics, rd_metrics_histogram, (collected_number)statsd.histograms.metrics);
+ rrddim_set_by_pointer(st_metrics, rd_metrics_set, (collected_number)statsd.sets.metrics);
+ rrdset_done(st_metrics);
+
+ rrddim_set_by_pointer(st_useful_metrics, rd_useful_metrics_gauge, (collected_number)statsd.gauges.useful);
+ rrddim_set_by_pointer(st_useful_metrics, rd_useful_metrics_counter, (collected_number)statsd.counters.useful);
+ rrddim_set_by_pointer(st_useful_metrics, rd_useful_metrics_timer, (collected_number)statsd.timers.useful);
+ rrddim_set_by_pointer(st_useful_metrics, rd_useful_metrics_meter, (collected_number)statsd.meters.useful);
+ rrddim_set_by_pointer(st_useful_metrics, rd_useful_metrics_histogram, (collected_number)statsd.histograms.useful);
+ rrddim_set_by_pointer(st_useful_metrics, rd_useful_metrics_set, (collected_number)statsd.sets.useful);
+ rrdset_done(st_useful_metrics);
+
+ rrddim_set_by_pointer(st_events, rd_events_gauge, (collected_number)statsd.gauges.events);
+ rrddim_set_by_pointer(st_events, rd_events_counter, (collected_number)statsd.counters.events);
+ rrddim_set_by_pointer(st_events, rd_events_timer, (collected_number)statsd.timers.events);
+ rrddim_set_by_pointer(st_events, rd_events_meter, (collected_number)statsd.meters.events);
+ rrddim_set_by_pointer(st_events, rd_events_histogram, (collected_number)statsd.histograms.events);
+ rrddim_set_by_pointer(st_events, rd_events_set, (collected_number)statsd.sets.events);
+ rrddim_set_by_pointer(st_events, rd_events_unknown, (collected_number)statsd.unknown_types);
+ rrddim_set_by_pointer(st_events, rd_events_errors, (collected_number)statsd.socket_errors);
+ rrdset_done(st_events);
+
+ rrddim_set_by_pointer(st_reads, rd_reads_tcp, (collected_number)statsd.tcp_socket_reads);
+ rrddim_set_by_pointer(st_reads, rd_reads_udp, (collected_number)statsd.udp_socket_reads);
+ rrdset_done(st_reads);
+
+ rrddim_set_by_pointer(st_bytes, rd_bytes_tcp, (collected_number)statsd.tcp_bytes_read);
+ rrddim_set_by_pointer(st_bytes, rd_bytes_udp, (collected_number)statsd.udp_bytes_read);
+ rrdset_done(st_bytes);
+
+ rrddim_set_by_pointer(st_packets, rd_packets_tcp, (collected_number)statsd.tcp_packets_received);
+ rrddim_set_by_pointer(st_packets, rd_packets_udp, (collected_number)statsd.udp_packets_received);
+ rrdset_done(st_packets);
+
+ rrddim_set_by_pointer(st_tcp_connects, rd_tcp_connects, (collected_number)statsd.tcp_socket_connects);
+ rrddim_set_by_pointer(st_tcp_connects, rd_tcp_disconnects, (collected_number)statsd.tcp_socket_disconnects);
+ rrdset_done(st_tcp_connects);
+
+ rrddim_set_by_pointer(st_tcp_connected, rd_tcp_connected, (collected_number)statsd.tcp_socket_connected);
+ rrdset_done(st_tcp_connected);
+
+ rrddim_set_by_pointer(st_pcharts, rd_pcharts, (collected_number)statsd.private_charts);
+ rrdset_done(st_pcharts);
+
+ rrddim_set_by_pointer(stcpu_thread, rd_user, thread.ru_utime.tv_sec * 1000000ULL + thread.ru_utime.tv_usec);
+ rrddim_set_by_pointer(stcpu_thread, rd_system, thread.ru_stime.tv_sec * 1000000ULL + thread.ru_stime.tv_usec);
+ rrdset_done(stcpu_thread);
+
+ for(i = 0; i < statsd.threads ;i++) {
+ rrddim_set_by_pointer(statsd.collection_threads_status[i].st_cpu, statsd.collection_threads_status[i].rd_user, statsd.collection_threads_status[i].rusage.ru_utime.tv_sec * 1000000ULL + statsd.collection_threads_status[i].rusage.ru_utime.tv_usec);
+ rrddim_set_by_pointer(statsd.collection_threads_status[i].st_cpu, statsd.collection_threads_status[i].rd_system, statsd.collection_threads_status[i].rusage.ru_stime.tv_sec * 1000000ULL + statsd.collection_threads_status[i].rusage.ru_stime.tv_usec);
+ rrdset_done(statsd.collection_threads_status[i].st_cpu);
+ }
+ }
+
+cleanup: ; // added semi-colon to prevent older gcc error: label at end of compound statement
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
diff --git a/collectors/statsd.plugin/statsd.h b/collectors/statsd.plugin/statsd.h
new file mode 100644
index 000000000..b741be76d
--- /dev/null
+++ b/collectors/statsd.plugin/statsd.h
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_STATSD_H
+#define NETDATA_STATSD_H 1
+
+#include "../../daemon/common.h"
+
+#define STATSD_LISTEN_PORT 8125
+#define STATSD_LISTEN_BACKLOG 4096
+
+#define NETDATA_PLUGIN_HOOK_STATSD \
+ { \
+ .name = "STATSD", \
+ .config_section = NULL, \
+ .config_name = NULL, \
+ .enabled = 1, \
+ .thread = NULL, \
+ .init_routine = NULL, \
+ .start_routine = statsd_main \
+ },
+
+
+extern void *statsd_main(void *ptr);
+
+#endif //NETDATA_STATSD_H
diff --git a/collectors/tc.plugin/Makefile.am b/collectors/tc.plugin/Makefile.am
new file mode 100644
index 000000000..f77e67d91
--- /dev/null
+++ b/collectors/tc.plugin/Makefile.am
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+CLEANFILES = \
+ tc-qos-helper.sh \
+ $(NULL)
+
+include $(top_srcdir)/build/subst.inc
+SUFFIXES = .in
+
+dist_plugins_SCRIPTS = \
+ tc-qos-helper.sh \
+ $(NULL)
+
+dist_noinst_DATA = \
+ tc-qos-helper.sh.in \
+ README.md \
+ $(NULL)
diff --git a/collectors/tc.plugin/Makefile.in b/collectors/tc.plugin/Makefile.in
new file mode 100644
index 000000000..d336e1f0d
--- /dev/null
+++ b/collectors/tc.plugin/Makefile.in
@@ -0,0 +1,562 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+DIST_COMMON = $(top_srcdir)/build/subst.inc $(srcdir)/Makefile.in \
+ $(srcdir)/Makefile.am $(dist_plugins_SCRIPTS) \
+ $(dist_noinst_DATA)
+subdir = collectors/tc.plugin
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+ test -z "$$files" \
+ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+ $(am__cd) "$$dir" && rm -f $$files; }; \
+ }
+am__installdirs = "$(DESTDIR)$(pluginsdir)"
+SCRIPTS = $(dist_plugins_SCRIPTS)
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+CLEANFILES = \
+ tc-qos-helper.sh \
+ $(NULL)
+
+SUFFIXES = .in
+dist_plugins_SCRIPTS = \
+ tc-qos-helper.sh \
+ $(NULL)
+
+dist_noinst_DATA = \
+ tc-qos-helper.sh.in \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .in
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/tc.plugin/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu collectors/tc.plugin/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+$(top_srcdir)/build/subst.inc:
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
+ done | \
+ sed -e 'p;s,.*/,,;n' \
+ -e 'h;s|.*|.|' \
+ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
+ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
+ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
+ if ($$2 == $$4) { files[d] = files[d] " " $$1; \
+ if (++n[d] == $(am__install_max)) { \
+ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
+ else { print "f", d "/" $$4, $$1 } } \
+ END { for (d in files) print "f", d, files[d] }' | \
+ while read type dir files; do \
+ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
+ test -z "$$files" || { \
+ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \
+ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \
+ } \
+ ; done
+
+uninstall-dist_pluginsSCRIPTS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \
+ files=`for p in $$list; do echo "$$p"; done | \
+ sed -e 's,.*/,,;$(transform)'`; \
+ dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir)
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(SCRIPTS) $(DATA)
+installdirs:
+ for dir in "$(DESTDIR)$(pluginsdir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+ -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am: install-dist_pluginsSCRIPTS
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-dist_pluginsSCRIPTS
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dist_pluginsSCRIPTS install-dvi \
+ install-dvi-am install-exec install-exec-am install-html \
+ install-html-am install-info install-info-am install-man \
+ install-pdf install-pdf-am install-ps install-ps-am \
+ install-strip installcheck installcheck-am installdirs \
+ maintainer-clean maintainer-clean-generic mostlyclean \
+ mostlyclean-generic pdf pdf-am ps ps-am tags-am uninstall \
+ uninstall-am uninstall-dist_pluginsSCRIPTS
+
+.in:
+ if sed \
+ -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \
+ -e 's#[@]sbindir_POST@#$(sbindir)#g' \
+ -e 's#[@]sysconfdir_POST@#$(sysconfdir)#g' \
+ -e 's#[@]pythondir_POST@#$(pythondir)#g' \
+ -e 's#[@]configdir_POST@#$(configdir)#g' \
+ -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \
+ -e 's#[@]cachedir_POST@#$(cachedir)#g' \
+ $< > $@.tmp; then \
+ mv "$@.tmp" "$@"; \
+ else \
+ rm -f "$@.tmp"; \
+ false; \
+ fi
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/collectors/tc.plugin/README.md b/collectors/tc.plugin/README.md
new file mode 100644
index 000000000..6670c491f
--- /dev/null
+++ b/collectors/tc.plugin/README.md
@@ -0,0 +1,183 @@
+## tc.plugin
+
+Live demo - **[see it in action here](https://registry.my-netdata.io/#menu_tc)** !
+
+![qos](https://cloud.githubusercontent.com/assets/2662304/14439411/b7f36254-0033-11e6-93f0-c739bb6a1c3a.gif)
+
+Netdata monitors `tc` QoS classes for all interfaces.
+
+If you also use [FireQOS](http://firehol.org/tutorial/fireqos-new-user/) it will collect
+interface and class names.
+
+There is a [shell helper](tc-qos-helper.sh.in) for this (all parsing is done by the plugin
+in `C` code - this shell script is just a configuration for the command to run to get `tc` output).
+
+The source of the tc plugin is [here](plugin_tc.c). It is somewhat complex, because a state
+machine was needed to keep track of all the `tc` classes, including the pseudo classes tc
+dynamically creates.
+
+## Motivation
+
+One category of metrics missing in Linux monitoring, is bandwidth consumption for each open
+socket (inbound and outbound traffic). So, you cannot tell how much bandwidth your web server,
+your database server, your backup, your ssh sessions, etc are using.
+
+To solve this problem, the most *adventurous* Linux monitoring tools install kernel modules to
+capture all traffic, analyze it and provide reports per application. A lot of work, CPU intensive
+and with a great degree of risk (due to the kernel modules involved which might affect the
+stability of the whole system). Not to mention that such solutions are probably better suited
+for a core linux router in your network.
+
+Others use NFACCT, the netfilter accounting module which is already part of the Linux firewall.
+However, this would require configuring a firewall on every system you want to measure bandwidth.
+
+QoS monitoring attempts to solve this in a much cleaner way.
+
+## Introduction to QoS
+
+One of the features the Linux kernel has, but it is rarely used, is its ability to
+**apply QoS on traffic**. Even most interesting is that it can apply QoS to **both inbound and
+outbound traffic**.
+
+QoS is about 2 features:
+
+1. **Classify traffic**
+
+ Classification is the process of organizing traffic in groups, called **classes**.
+ Classification can evaluate every aspect of network packets, like source and destination ports,
+ source and destination IPs, netfilter marks, etc.
+
+ When you classify traffic, you just assign a label to it. For example **I call `web server`
+ traffic, the traffic from my server's tcp/80, tcp/443 and to my server's tcp/80, tcp/443,
+ while I call `web surfing` all other tcp/80 and tcp/443 traffic**. You can use any combinations
+ you like. There is no limit.
+
+2. **Apply traffic shaping rules to these classes**
+
+ Traffic shaping is used to control how network interface bandwidth should be shared among the
+ classes. Of course we are not interested for this feature to just monitor the traffic.
+ Classification will be enough for monitoring everything.
+
+The key reasons of applying QoS on all servers (even cloud ones) are:
+
+ - **ensure administrative tasks (like ssh, dns, etc) will always have a small but guaranteed
+ bandwidth.** QoS can guarantee that services like ssh, dns, ntp, etc will always have a small
+ supply of bandwidth. So, no matter what happens, you will be able to ssh to your server and
+ DNS will always work.
+
+ - **ensure other administrative tasks will not monopolize all the available bandwidth.**
+ Services like backups, file copies, database dumps, etc can easily monopolize all the
+ available bandwidth. It is common for example a nightly backup, or a huge file transfer
+ to negatively influence the end-user experience. QoS can fix that.
+
+ - **ensure each end-user connection will get a fair cut of the available bandwidth.**
+ Several QoS queuing disciplines in Linux do this automatically, without any configuration from you.
+ The result is that new sockets are favored over older ones, so that users will get a snappier
+ experience, while others are transferring large amounts of traffic.
+
+ - **protect the servers from DDoS attacks.**
+ When your system is under a DDoS attack, it will get a lot more bandwidth compared to the one it
+ can handle and probably your applications will crash. Setting a limit on the inbound traffic using
+ QoS, will protect your servers (throttle the requests) and depending on the size of the attack may
+ allow your legitimate users to access the server, while the attack is taking place.
+
+
+Once **traffic classification** is applied, netdata can visualize the bandwidth consumption per
+class in real-time (no configuration is needed for netdata - it will figure it out).
+
+QoS, is extremely light. You will configure it once, and this is it. It will not bother you again
+and it will not use any noticeable CPU resources, especially on application and database servers.
+
+## QoS in Linux? Have you lost your mind?
+
+Yes I know... but no, I have not!
+
+Of course, `tc` is probably **the most undocumented, complicated and unfriendly** command in Linux.
+
+For example, for matching a simple port range in `tc`, e.g. all the high ports, from 1025 to 65535
+inclusive, you have to match these:
+
+```
+1025/0xffff 1026/0xfffe 1028/0xfffc 1032/0xfff8 1040/0xfff0
+1056/0xffe0 1088/0xffc0 1152/0xff80 1280/0xff00 1536/0xfe00
+2048/0xf800 4096/0xf000 8192/0xe000 16384/0xc000 32768/0x8000
+```
+
+I know what you are thinking right now! **And I agree!**
+
+This is why I wrote **[FireQOS](https://firehol.org/tutorial/fireqos-new-user/)**, a tool to
+simplify QoS management in Linux.
+
+The **[FireHOL](https://firehol.org/)** package already distributes **[FireQOS](https://firehol.org/tutorial/fireqos-new-user/)**.
+Check the **[FireQOS tutorial](https://firehol.org/tutorial/fireqos-new-user/)**
+to learn how to write your own QoS configuration.
+
+With **[FireQOS](https://firehol.org/tutorial/fireqos-new-user/)**, it is **really simple for everyone
+to use QoS in Linux**. Just install the package `firehol`. It should already be available for your
+distribution. If not, check the **[FireHOL Installation Guide](https://firehol.org/installing/)**.
+After that, you will have the `fireqos` command.
+
+This is the file `/etc/firehol/fireqos.conf` we use at the netdata demo site:
+
+```sh
+ # configure the netdata ports
+ server_netdata_ports="tcp/19999"
+
+ interface eth0 world bidirectional ethernet balanced rate 50Mbit
+ class arp
+ match arp
+
+ class icmp
+ match icmp
+
+ class dns commit 1Mbit
+ server dns
+ client dns
+
+ class ntp
+ server ntp
+ client ntp
+
+ class ssh commit 2Mbit
+ server ssh
+ client ssh
+
+ class rsync commit 2Mbit max 10Mbit
+ server rsync
+ client rsync
+
+ class web_server commit 40Mbit
+ server http
+ server netdata
+
+ class client
+ client surfing
+
+ class nms commit 1Mbit
+ match input src 10.2.3.5
+```
+
+Nothing more is needed. You just run `fireqos start` to apply this configuration, restart netdata
+and you have real-time visualization of the bandwidth consumption of your applications. FireQOS is
+not a daemon. It will just convert the configuration to `tc` commands. It will run them and it will
+exit.
+
+**IMPORTANT**: If you copy this configuration to apply it to your system, please adapt the
+speeds - experiment in non-production environments to learn the tool, before applying it on
+your servers.
+
+And this is what you are going to get:
+
+![image](https://cloud.githubusercontent.com/assets/2662304/14436322/c91d90a4-0024-11e6-9fb1-57cdef1580df.png)
+
+---
+
+## More examples:
+
+This is QoS from a linux router. Check these features:
+
+1. It is real-time (per second updates)
+2. QoS really works in Linux - check that the `background` traffic is squeezed when `surfing` needs it.
+
+![test2](https://cloud.githubusercontent.com/assets/2662304/14093004/68966020-f553-11e5-98fe-ffee2086fafd.gif)
+
diff --git a/collectors/tc.plugin/plugin_tc.c b/collectors/tc.plugin/plugin_tc.c
new file mode 100644
index 000000000..083cc2986
--- /dev/null
+++ b/collectors/tc.plugin/plugin_tc.c
@@ -0,0 +1,1168 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_tc.h"
+
+#define RRD_TYPE_TC "tc"
+#define PLUGIN_TC_NAME "tc.plugin"
+
+// ----------------------------------------------------------------------------
+// /sbin/tc processor
+// this requires the script plugins.d/tc-qos-helper.sh
+
+#define TC_LINE_MAX 1024
+
+struct tc_class {
+ avl avl;
+
+ char *id;
+ uint32_t hash;
+
+ char *name;
+
+ char *leafid;
+ uint32_t leaf_hash;
+
+ char *parentid;
+ uint32_t parent_hash;
+
+ char hasparent;
+ char isleaf;
+ char isqdisc;
+ char render;
+
+ unsigned long long bytes;
+ unsigned long long packets;
+ unsigned long long dropped;
+ unsigned long long overlimits;
+ unsigned long long requeues;
+ unsigned long long lended;
+ unsigned long long borrowed;
+ unsigned long long giants;
+ unsigned long long tokens;
+ unsigned long long ctokens;
+
+ RRDDIM *rd_bytes;
+ RRDDIM *rd_packets;
+ RRDDIM *rd_dropped;
+ RRDDIM *rd_tokens;
+ RRDDIM *rd_ctokens;
+
+ char name_updated;
+ char updated; // updated bytes
+ int unupdated; // the number of times, this has been found un-updated
+
+ struct tc_class *next;
+ struct tc_class *prev;
+};
+
+struct tc_device {
+ avl avl;
+
+ char *id;
+ uint32_t hash;
+
+ char *name;
+ char *family;
+
+ char name_updated;
+ char family_updated;
+
+ char enabled;
+ char enabled_bytes;
+ char enabled_packets;
+ char enabled_dropped;
+ char enabled_tokens;
+ char enabled_ctokens;
+ char enabled_all_classes_qdiscs;
+
+ RRDSET *st_bytes;
+ RRDSET *st_packets;
+ RRDSET *st_dropped;
+ RRDSET *st_tokens;
+ RRDSET *st_ctokens;
+
+ avl_tree classes_index;
+
+ struct tc_class *classes;
+ struct tc_class *last_class;
+
+ struct tc_device *next;
+ struct tc_device *prev;
+};
+
+
+struct tc_device *tc_device_root = NULL;
+
+// ----------------------------------------------------------------------------
+// tc_device index
+
+static int tc_device_compare(void* a, void* b) {
+ if(((struct tc_device *)a)->hash < ((struct tc_device *)b)->hash) return -1;
+ else if(((struct tc_device *)a)->hash > ((struct tc_device *)b)->hash) return 1;
+ else return strcmp(((struct tc_device *)a)->id, ((struct tc_device *)b)->id);
+}
+
+avl_tree tc_device_root_index = {
+ NULL,
+ tc_device_compare
+};
+
+#define tc_device_index_add(st) (struct tc_device *)avl_insert(&tc_device_root_index, (avl *)(st))
+#define tc_device_index_del(st) (struct tc_device *)avl_remove(&tc_device_root_index, (avl *)(st))
+
+static inline struct tc_device *tc_device_index_find(const char *id, uint32_t hash) {
+ struct tc_device tmp;
+ tmp.id = (char *)id;
+ tmp.hash = (hash)?hash:simple_hash(tmp.id);
+
+ return (struct tc_device *)avl_search(&(tc_device_root_index), (avl *)&tmp);
+}
+
+
+// ----------------------------------------------------------------------------
+// tc_class index
+
+static int tc_class_compare(void* a, void* b) {
+ if(((struct tc_class *)a)->hash < ((struct tc_class *)b)->hash) return -1;
+ else if(((struct tc_class *)a)->hash > ((struct tc_class *)b)->hash) return 1;
+ else return strcmp(((struct tc_class *)a)->id, ((struct tc_class *)b)->id);
+}
+
+#define tc_class_index_add(st, rd) (struct tc_class *)avl_insert(&((st)->classes_index), (avl *)(rd))
+#define tc_class_index_del(st, rd) (struct tc_class *)avl_remove(&((st)->classes_index), (avl *)(rd))
+
+static inline struct tc_class *tc_class_index_find(struct tc_device *st, const char *id, uint32_t hash) {
+ struct tc_class tmp;
+ tmp.id = (char *)id;
+ tmp.hash = (hash)?hash:simple_hash(tmp.id);
+
+ return (struct tc_class *)avl_search(&(st->classes_index), (avl *) &tmp);
+}
+
+// ----------------------------------------------------------------------------
+
+static inline void tc_class_free(struct tc_device *n, struct tc_class *c) {
+ if(c == n->classes) {
+ if(likely(c->next))
+ n->classes = c->next;
+ else
+ n->classes = c->prev;
+ }
+
+ if(c == n->last_class) {
+ if(unlikely(c->next))
+ n->last_class = c->next;
+ else
+ n->last_class = c->prev;
+ }
+
+ if(c->next) c->next->prev = c->prev;
+ if(c->prev) c->prev->next = c->next;
+
+ debug(D_TC_LOOP, "Removing from device '%s' class '%s', parentid '%s', leafid '%s', unused=%d", n->id, c->id, c->parentid?c->parentid:"", c->leafid?c->leafid:"", c->unupdated);
+
+ if(unlikely(tc_class_index_del(n, c) != c))
+ error("plugin_tc: INTERNAL ERROR: attempt remove class '%s' from device '%s': removed a different calls", c->id, n->id);
+
+ freez(c->id);
+ freez(c->name);
+ freez(c->leafid);
+ freez(c->parentid);
+ freez(c);
+}
+
+static inline void tc_device_classes_cleanup(struct tc_device *d) {
+ static int cleanup_every = 999;
+
+ if(unlikely(cleanup_every > 0)) {
+ cleanup_every = (int) config_get_number("plugin:tc", "cleanup unused classes every", 120);
+ if(cleanup_every < 0) cleanup_every = -cleanup_every;
+ }
+
+ d->name_updated = 0;
+ d->family_updated = 0;
+
+ struct tc_class *c = d->classes;
+ while(c) {
+ if(unlikely(cleanup_every && c->unupdated >= cleanup_every)) {
+ struct tc_class *nc = c->next;
+ tc_class_free(d, c);
+ c = nc;
+ }
+ else {
+ c->updated = 0;
+ c->name_updated = 0;
+
+ c = c->next;
+ }
+ }
+}
+
+static inline void tc_device_commit(struct tc_device *d) {
+ static int enable_new_interfaces = -1, enable_bytes = -1, enable_packets = -1, enable_dropped = -1, enable_tokens = -1, enable_ctokens = -1, enabled_all_classes_qdiscs = -1;
+
+ if(unlikely(enable_new_interfaces == -1)) {
+ enable_new_interfaces = config_get_boolean_ondemand("plugin:tc", "enable new interfaces detected at runtime", CONFIG_BOOLEAN_YES);
+ enable_bytes = config_get_boolean_ondemand("plugin:tc", "enable traffic charts for all interfaces", CONFIG_BOOLEAN_AUTO);
+ enable_packets = config_get_boolean_ondemand("plugin:tc", "enable packets charts for all interfaces", CONFIG_BOOLEAN_AUTO);
+ enable_dropped = config_get_boolean_ondemand("plugin:tc", "enable dropped charts for all interfaces", CONFIG_BOOLEAN_AUTO);
+ enable_tokens = config_get_boolean_ondemand("plugin:tc", "enable tokens charts for all interfaces", CONFIG_BOOLEAN_NO);
+ enable_ctokens = config_get_boolean_ondemand("plugin:tc", "enable ctokens charts for all interfaces", CONFIG_BOOLEAN_NO);
+ enabled_all_classes_qdiscs = config_get_boolean_ondemand("plugin:tc", "enable show all classes and qdiscs for all interfaces", CONFIG_BOOLEAN_NO);
+ }
+
+ if(unlikely(d->enabled == (char)-1)) {
+ char var_name[CONFIG_MAX_NAME + 1];
+ snprintfz(var_name, CONFIG_MAX_NAME, "qos for %s", d->id);
+
+ d->enabled = (char)config_get_boolean_ondemand("plugin:tc", var_name, enable_new_interfaces);
+
+ snprintfz(var_name, CONFIG_MAX_NAME, "traffic chart for %s", d->id);
+ d->enabled_bytes = (char)config_get_boolean_ondemand("plugin:tc", var_name, enable_bytes);
+
+ snprintfz(var_name, CONFIG_MAX_NAME, "packets chart for %s", d->id);
+ d->enabled_packets = (char)config_get_boolean_ondemand("plugin:tc", var_name, enable_packets);
+
+ snprintfz(var_name, CONFIG_MAX_NAME, "dropped packets chart for %s", d->id);
+ d->enabled_dropped = (char)config_get_boolean_ondemand("plugin:tc", var_name, enable_dropped);
+
+ snprintfz(var_name, CONFIG_MAX_NAME, "tokens chart for %s", d->id);
+ d->enabled_tokens = (char)config_get_boolean_ondemand("plugin:tc", var_name, enable_tokens);
+
+ snprintfz(var_name, CONFIG_MAX_NAME, "ctokens chart for %s", d->id);
+ d->enabled_ctokens = (char)config_get_boolean_ondemand("plugin:tc", var_name, enable_ctokens);
+
+ snprintfz(var_name, CONFIG_MAX_NAME, "show all classes for %s", d->id);
+ d->enabled_all_classes_qdiscs = (char)config_get_boolean_ondemand("plugin:tc", var_name, enabled_all_classes_qdiscs);
+ }
+
+ // we only need to add leaf classes
+ struct tc_class *c, *x /*, *root = NULL */;
+ unsigned long long bytes_sum = 0, packets_sum = 0, dropped_sum = 0, tokens_sum = 0, ctokens_sum = 0;
+ int active_nodes = 0, updated_classes = 0, updated_qdiscs = 0;
+
+ // prepare all classes
+ // we set reasonable defaults for the rest of the code below
+
+ for(c = d->classes ; c ; c = c->next) {
+ c->render = 0; // do not render this class
+
+ c->isleaf = 1; // this is a leaf class
+ c->hasparent = 0; // without a parent
+
+ if(unlikely(!c->updated))
+ c->unupdated++; // increase its unupdated counter
+ else {
+ c->unupdated = 0; // reset its unupdated counter
+
+ // count how many of each kind
+ if(c->isqdisc)
+ updated_qdiscs++;
+ else
+ updated_classes++;
+ }
+ }
+
+ if(unlikely(!d->enabled || (!updated_classes && !updated_qdiscs))) {
+ debug(D_TC_LOOP, "TC: Ignoring TC device '%s'. It is not enabled/updated.", d->name?d->name:d->id);
+ tc_device_classes_cleanup(d);
+ return;
+ }
+
+ if(unlikely(updated_classes && updated_qdiscs)) {
+ error("TC: device '%s' has active both classes (%d) and qdiscs (%d). Will render only qdiscs.", d->id, updated_classes, updated_qdiscs);
+
+ // set all classes to !updated
+ for(c = d->classes ; c ; c = c->next)
+ if(unlikely(!c->isqdisc && c->updated))
+ c->updated = 0;
+
+ updated_classes = 0;
+ }
+
+ // mark the classes as leafs and parents
+ //
+ // TC is hierarchical:
+ // - classes can have other classes in them
+ // - the same is true for qdiscs (i.e. qdiscs have classes, that have other qdiscs)
+ //
+ // we need to present a chart with leaf nodes only, so that the sum
+ // of all dimensions of the chart, will be the total utilization
+ // of the interface.
+ //
+ // here we try to find the ones we need to report
+ // by default all nodes are marked with: isleaf = 1 (see above)
+ //
+ // so, here we remove the isleaf flag from nodes in the middle
+ // and we add the hasparent flag to leaf nodes we found their parent
+ if(likely(!d->enabled_all_classes_qdiscs)) {
+ for(c = d->classes; c; c = c->next) {
+ if(unlikely(!c->updated)) continue;
+
+ //debug(D_TC_LOOP, "TC: In device '%s', %s '%s' has leafid: '%s' and parentid '%s'.",
+ // d->id,
+ // c->isqdisc?"qdisc":"class",
+ // c->id,
+ // c->leafid?c->leafid:"NULL",
+ // c->parentid?c->parentid:"NULL");
+
+ // find if c is leaf or not
+ for(x = d->classes; x; x = x->next) {
+ if(unlikely(!x->updated || c == x || !x->parentid)) continue;
+
+ // classes have both parentid and leafid
+ // qdiscs have only parentid
+ // the following works for both (it is an OR)
+
+ if((c->hash == x->parent_hash && strcmp(c->id, x->parentid) == 0) ||
+ (c->leafid && c->leaf_hash == x->parent_hash && strcmp(c->leafid, x->parentid) == 0)) {
+ // debug(D_TC_LOOP, "TC: In device '%s', %s '%s' (leafid: '%s') has as leaf %s '%s' (parentid: '%s').", d->name?d->name:d->id, c->isqdisc?"qdisc":"class", c->name?c->name:c->id, c->leafid?c->leafid:c->id, x->isqdisc?"qdisc":"class", x->name?x->name:x->id, x->parentid?x->parentid:x->id);
+ c->isleaf = 0;
+ x->hasparent = 1;
+ }
+ }
+ }
+ }
+
+ for(c = d->classes ; c ; c = c->next) {
+ if(unlikely(!c->updated)) continue;
+
+ // debug(D_TC_LOOP, "TC: device '%s', %s '%s' isleaf=%d, hasparent=%d", d->id, (c->isqdisc)?"qdisc":"class", c->id, c->isleaf, c->hasparent);
+
+ if(unlikely((c->isleaf && c->hasparent) || d->enabled_all_classes_qdiscs)) {
+ c->render = 1;
+ active_nodes++;
+ bytes_sum += c->bytes;
+ packets_sum += c->packets;
+ dropped_sum += c->dropped;
+ tokens_sum += c->tokens;
+ ctokens_sum += c->ctokens;
+ }
+
+ //if(unlikely(!c->hasparent)) {
+ // if(root) error("TC: multiple root class/qdisc for device '%s' (old: '%s', new: '%s')", d->id, root->id, c->id);
+ // root = c;
+ // debug(D_TC_LOOP, "TC: found root class/qdisc '%s'", root->id);
+ //}
+ }
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ // dump all the list to see what we know
+
+ if(unlikely(debug_flags & D_TC_LOOP)) {
+ for(c = d->classes ; c ; c = c->next) {
+ if(c->render) debug(D_TC_LOOP, "TC: final nodes dump for '%s': class %s, OK", d->name, c->id);
+ else debug(D_TC_LOOP, "TC: final nodes dump for '%s': class %s, IGNORE (updated: %d, isleaf: %d, hasparent: %d, parent: %s)", d->name?d->name:d->id, c->id, c->updated, c->isleaf, c->hasparent, c->parentid?c->parentid:"(unset)");
+ }
+ }
+#endif
+
+ if(unlikely(!active_nodes)) {
+ debug(D_TC_LOOP, "TC: Ignoring TC device '%s'. No useful classes/qdiscs.", d->name?d->name:d->id);
+ tc_device_classes_cleanup(d);
+ return;
+ }
+
+ debug(D_TC_LOOP, "TC: evaluating TC device '%s'. enabled = %d/%d (bytes: %d/%d, packets: %d/%d, dropped: %d/%d, tokens: %d/%d, ctokens: %d/%d, all_classes_qdiscs: %d/%d), classes: (bytes = %llu, packets = %llu, dropped = %llu, tokens = %llu, ctokens = %llu).",
+ d->name?d->name:d->id,
+ d->enabled, enable_new_interfaces,
+ d->enabled_bytes, enable_bytes,
+ d->enabled_packets, enable_packets,
+ d->enabled_dropped, enable_dropped,
+ d->enabled_tokens, enable_tokens,
+ d->enabled_ctokens, enable_ctokens,
+ d->enabled_all_classes_qdiscs, enabled_all_classes_qdiscs,
+ bytes_sum,
+ packets_sum,
+ dropped_sum,
+ tokens_sum,
+ ctokens_sum
+ );
+
+ // --------------------------------------------------------------------
+ // bytes
+
+ if(d->enabled_bytes == CONFIG_BOOLEAN_YES || (d->enabled_bytes == CONFIG_BOOLEAN_AUTO && bytes_sum)) {
+ d->enabled_bytes = CONFIG_BOOLEAN_YES;
+
+ if(unlikely(!d->st_bytes))
+ d->st_bytes = rrdset_create_localhost(
+ RRD_TYPE_TC
+ , d->id
+ , d->name ? d->name : d->id
+ , d->family ? d->family : d->id
+ , RRD_TYPE_TC ".qos"
+ , "Class Usage"
+ , "kilobits/s"
+ , PLUGIN_TC_NAME
+ , NULL
+ , NETDATA_CHART_PRIO_TC_QOS
+ , localhost->rrd_update_every
+ , d->enabled_all_classes_qdiscs ? RRDSET_TYPE_LINE : RRDSET_TYPE_STACKED
+ );
+
+ else {
+ rrdset_next(d->st_bytes);
+ if(unlikely(d->name_updated)) rrdset_set_name(d->st_bytes, d->name);
+
+ // TODO
+ // update the family
+ }
+
+ for(c = d->classes ; c ; c = c->next) {
+ if(unlikely(!c->render)) continue;
+
+ if(unlikely(!c->rd_bytes))
+ c->rd_bytes = rrddim_add(d->st_bytes, c->id, c->name?c->name:c->id, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ else if(unlikely(c->name_updated))
+ rrddim_set_name(d->st_bytes, c->rd_bytes, c->name);
+
+ rrddim_set_by_pointer(d->st_bytes, c->rd_bytes, c->bytes);
+ }
+ rrdset_done(d->st_bytes);
+ }
+
+ // --------------------------------------------------------------------
+ // packets
+
+ if(d->enabled_packets == CONFIG_BOOLEAN_YES || (d->enabled_packets == CONFIG_BOOLEAN_AUTO && packets_sum)) {
+ d->enabled_packets = CONFIG_BOOLEAN_YES;
+
+ if(unlikely(!d->st_packets)) {
+ char id[RRD_ID_LENGTH_MAX + 1];
+ char name[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(id, RRD_ID_LENGTH_MAX, "%s_packets", d->id);
+ snprintfz(name, RRD_ID_LENGTH_MAX, "%s_packets", d->name?d->name:d->id);
+
+ d->st_packets = rrdset_create_localhost(
+ RRD_TYPE_TC
+ , id
+ , name
+ , d->family ? d->family : d->id
+ , RRD_TYPE_TC ".qos_packets"
+ , "Class Packets"
+ , "packets/s"
+ , PLUGIN_TC_NAME
+ , NULL
+ , NETDATA_CHART_PRIO_TC_QOS_PACKETS
+ , localhost->rrd_update_every
+ , d->enabled_all_classes_qdiscs ? RRDSET_TYPE_LINE : RRDSET_TYPE_STACKED
+ );
+ }
+ else {
+ rrdset_next(d->st_packets);
+
+ if(unlikely(d->name_updated)) {
+ char name[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(name, RRD_ID_LENGTH_MAX, "%s_packets", d->name?d->name:d->id);
+ rrdset_set_name(d->st_packets, name);
+ }
+
+ // TODO
+ // update the family
+ }
+
+ for(c = d->classes ; c ; c = c->next) {
+ if(unlikely(!c->render)) continue;
+
+ if(unlikely(!c->rd_packets))
+ c->rd_packets = rrddim_add(d->st_packets, c->id, c->name?c->name:c->id, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ else if(unlikely(c->name_updated))
+ rrddim_set_name(d->st_packets, c->rd_packets, c->name);
+
+ rrddim_set_by_pointer(d->st_packets, c->rd_packets, c->packets);
+ }
+ rrdset_done(d->st_packets);
+ }
+
+ // --------------------------------------------------------------------
+ // dropped
+
+ if(d->enabled_dropped == CONFIG_BOOLEAN_YES || (d->enabled_dropped == CONFIG_BOOLEAN_AUTO && dropped_sum)) {
+ d->enabled_dropped = CONFIG_BOOLEAN_YES;
+
+ if(unlikely(!d->st_dropped)) {
+ char id[RRD_ID_LENGTH_MAX + 1];
+ char name[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(id, RRD_ID_LENGTH_MAX, "%s_dropped", d->id);
+ snprintfz(name, RRD_ID_LENGTH_MAX, "%s_dropped", d->name?d->name:d->id);
+
+ d->st_dropped = rrdset_create_localhost(
+ RRD_TYPE_TC
+ , id
+ , name
+ , d->family ? d->family : d->id
+ , RRD_TYPE_TC ".qos_dropped"
+ , "Class Dropped Packets"
+ , "packets/s"
+ , PLUGIN_TC_NAME
+ , NULL
+ , NETDATA_CHART_PRIO_TC_QOS_DROPPED
+ , localhost->rrd_update_every
+ , d->enabled_all_classes_qdiscs ? RRDSET_TYPE_LINE : RRDSET_TYPE_STACKED
+ );
+ }
+ else {
+ rrdset_next(d->st_dropped);
+
+ if(unlikely(d->name_updated)) {
+ char name[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(name, RRD_ID_LENGTH_MAX, "%s_dropped", d->name?d->name:d->id);
+ rrdset_set_name(d->st_dropped, name);
+ }
+
+ // TODO
+ // update the family
+ }
+
+ for(c = d->classes ; c ; c = c->next) {
+ if(unlikely(!c->render)) continue;
+
+ if(unlikely(!c->rd_dropped))
+ c->rd_dropped = rrddim_add(d->st_dropped, c->id, c->name?c->name:c->id, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ else if(unlikely(c->name_updated))
+ rrddim_set_name(d->st_dropped, c->rd_dropped, c->name);
+
+ rrddim_set_by_pointer(d->st_dropped, c->rd_dropped, c->dropped);
+ }
+ rrdset_done(d->st_dropped);
+ }
+
+ // --------------------------------------------------------------------
+ // tokens
+
+ if(d->enabled_tokens == CONFIG_BOOLEAN_YES || (d->enabled_tokens == CONFIG_BOOLEAN_AUTO && tokens_sum)) {
+ d->enabled_tokens = CONFIG_BOOLEAN_YES;
+
+ if(unlikely(!d->st_tokens)) {
+ char id[RRD_ID_LENGTH_MAX + 1];
+ char name[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(id, RRD_ID_LENGTH_MAX, "%s_tokens", d->id);
+ snprintfz(name, RRD_ID_LENGTH_MAX, "%s_tokens", d->name?d->name:d->id);
+
+ d->st_tokens = rrdset_create_localhost(
+ RRD_TYPE_TC
+ , id
+ , name
+ , d->family ? d->family : d->id
+ , RRD_TYPE_TC ".qos_tokens"
+ , "Class Tokens"
+ , "tokens"
+ , PLUGIN_TC_NAME
+ , NULL
+ , NETDATA_CHART_PRIO_TC_QOS_TOCKENS
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_LINE
+ );
+ }
+ else {
+ rrdset_next(d->st_tokens);
+
+ if(unlikely(d->name_updated)) {
+ char name[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(name, RRD_ID_LENGTH_MAX, "%s_tokens", d->name?d->name:d->id);
+ rrdset_set_name(d->st_tokens, name);
+ }
+
+ // TODO
+ // update the family
+ }
+
+ for(c = d->classes ; c ; c = c->next) {
+ if(unlikely(!c->render)) continue;
+
+ if(unlikely(!c->rd_tokens)) {
+ c->rd_tokens = rrddim_add(d->st_tokens, c->id, c->name?c->name:c->id, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else if(unlikely(c->name_updated))
+ rrddim_set_name(d->st_tokens, c->rd_tokens, c->name);
+
+ rrddim_set_by_pointer(d->st_tokens, c->rd_tokens, c->tokens);
+ }
+ rrdset_done(d->st_tokens);
+ }
+
+ // --------------------------------------------------------------------
+ // ctokens
+
+ if(d->enabled_ctokens == CONFIG_BOOLEAN_YES || (d->enabled_ctokens == CONFIG_BOOLEAN_AUTO && ctokens_sum)) {
+ d->enabled_ctokens = CONFIG_BOOLEAN_YES;
+
+ if(unlikely(!d->st_ctokens)) {
+ char id[RRD_ID_LENGTH_MAX + 1];
+ char name[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(id, RRD_ID_LENGTH_MAX, "%s_ctokens", d->id);
+ snprintfz(name, RRD_ID_LENGTH_MAX, "%s_ctokens", d->name?d->name:d->id);
+
+ d->st_ctokens = rrdset_create_localhost(
+ RRD_TYPE_TC
+ , id
+ , name
+ , d->family ? d->family : d->id
+ , RRD_TYPE_TC ".qos_ctokens"
+ , "Class cTokens"
+ , "ctokens"
+ , PLUGIN_TC_NAME
+ , NULL
+ , NETDATA_CHART_PRIO_TC_QOS_CTOCKENS
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_LINE
+ );
+ }
+ else {
+ debug(D_TC_LOOP, "TC: Updating _ctokens chart for device '%s'", d->name?d->name:d->id);
+ rrdset_next(d->st_ctokens);
+
+ if(unlikely(d->name_updated)) {
+ char name[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(name, RRD_ID_LENGTH_MAX, "%s_ctokens", d->name?d->name:d->id);
+ rrdset_set_name(d->st_ctokens, name);
+ }
+
+ // TODO
+ // update the family
+ }
+
+ for(c = d->classes ; c ; c = c->next) {
+ if(unlikely(!c->render)) continue;
+
+ if(unlikely(!c->rd_ctokens))
+ c->rd_ctokens = rrddim_add(d->st_ctokens, c->id, c->name?c->name:c->id, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ else if(unlikely(c->name_updated))
+ rrddim_set_name(d->st_ctokens, c->rd_ctokens, c->name);
+
+ rrddim_set_by_pointer(d->st_ctokens, c->rd_ctokens, c->ctokens);
+ }
+ rrdset_done(d->st_ctokens);
+ }
+
+ tc_device_classes_cleanup(d);
+}
+
+static inline void tc_device_set_class_name(struct tc_device *d, char *id, char *name) {
+ if(unlikely(!name || !*name)) return;
+
+ struct tc_class *c = tc_class_index_find(d, id, 0);
+ if(likely(c)) {
+ if(likely(c->name)) {
+ if(!strcmp(c->name, name)) return;
+ freez(c->name);
+ c->name = NULL;
+ }
+
+ if(likely(name && *name && strcmp(c->id, name) != 0)) {
+ debug(D_TC_LOOP, "TC: Setting device '%s', class '%s' name to '%s'", d->id, id, name);
+ c->name = strdupz(name);
+ c->name_updated = 1;
+ }
+ }
+}
+
+static inline void tc_device_set_device_name(struct tc_device *d, char *name) {
+ if(unlikely(!name || !*name)) return;
+
+ if(d->name) {
+ if(!strcmp(d->name, name)) return;
+ freez(d->name);
+ d->name = NULL;
+ }
+
+ if(likely(name && *name && strcmp(d->id, name) != 0)) {
+ debug(D_TC_LOOP, "TC: Setting device '%s' name to '%s'", d->id, name);
+ d->name = strdupz(name);
+ d->name_updated = 1;
+ }
+}
+
+static inline void tc_device_set_device_family(struct tc_device *d, char *family) {
+ freez(d->family);
+ d->family = NULL;
+
+ if(likely(family && *family && strcmp(d->id, family) != 0)) {
+ debug(D_TC_LOOP, "TC: Setting device '%s' family to '%s'", d->id, family);
+ d->family = strdupz(family);
+ d->family_updated = 1;
+ }
+ // no need for null termination - it is already null
+}
+
+static inline struct tc_device *tc_device_create(char *id)
+{
+ struct tc_device *d = tc_device_index_find(id, 0);
+
+ if(!d) {
+ debug(D_TC_LOOP, "TC: Creating device '%s'", id);
+
+ d = callocz(1, sizeof(struct tc_device));
+
+ d->id = strdupz(id);
+ d->hash = simple_hash(d->id);
+ d->enabled = (char)-1;
+
+ avl_init(&d->classes_index, tc_class_compare);
+ if(unlikely(tc_device_index_add(d) != d))
+ error("plugin_tc: INTERNAL ERROR: removing device '%s' removed a different device.", d->id);
+
+ if(!tc_device_root) {
+ tc_device_root = d;
+ }
+ else {
+ d->next = tc_device_root;
+ tc_device_root->prev = d;
+ tc_device_root = d;
+ }
+ }
+
+ return(d);
+}
+
+static inline struct tc_class *tc_class_add(struct tc_device *n, char *id, char qdisc, char *parentid, char *leafid)
+{
+ struct tc_class *c = tc_class_index_find(n, id, 0);
+
+ if(!c) {
+ debug(D_TC_LOOP, "TC: Creating in device '%s', class id '%s', parentid '%s', leafid '%s'", n->id, id, parentid?parentid:"", leafid?leafid:"");
+
+ c = callocz(1, sizeof(struct tc_class));
+
+ if(unlikely(!n->classes))
+ n->classes = c;
+
+ else if(likely(n->last_class)) {
+ n->last_class->next = c;
+ c->prev = n->last_class;
+ }
+
+ n->last_class = c;
+
+ c->id = strdupz(id);
+ c->hash = simple_hash(c->id);
+
+ c->isqdisc = qdisc;
+ if(parentid && *parentid) {
+ c->parentid = strdupz(parentid);
+ c->parent_hash = simple_hash(c->parentid);
+ }
+
+ if(leafid && *leafid) {
+ c->leafid = strdupz(leafid);
+ c->leaf_hash = simple_hash(c->leafid);
+ }
+
+ if(unlikely(tc_class_index_add(n, c) != c))
+ error("plugin_tc: INTERNAL ERROR: attempt index class '%s' on device '%s': already exists", c->id, n->id);
+ }
+ return(c);
+}
+
+static inline void tc_device_free(struct tc_device *n)
+{
+ if(n->next) n->next->prev = n->prev;
+ if(n->prev) n->prev->next = n->next;
+ if(tc_device_root == n) {
+ if(n->next) tc_device_root = n->next;
+ else tc_device_root = n->prev;
+ }
+
+ if(unlikely(tc_device_index_del(n) != n))
+ error("plugin_tc: INTERNAL ERROR: removing device '%s' removed a different device.", n->id);
+
+ while(n->classes) tc_class_free(n, n->classes);
+
+ freez(n->id);
+ freez(n->name);
+ freez(n->family);
+ freez(n);
+}
+
+static inline void tc_device_free_all()
+{
+ while(tc_device_root)
+ tc_device_free(tc_device_root);
+}
+
+#define PLUGINSD_MAX_WORDS 20
+
+static inline int tc_space(char c) {
+ switch(c) {
+ case ' ':
+ case '\t':
+ case '\r':
+ case '\n':
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+static inline void tc_split_words(char *str, char **words, int max_words) {
+ char *s = str;
+ int i = 0;
+
+ // skip all white space
+ while(tc_space(*s)) s++;
+
+ // store the first word
+ words[i++] = s;
+
+ // while we have something
+ while(*s) {
+ // if it is a space
+ if(unlikely(tc_space(*s))) {
+
+ // terminate the word
+ *s++ = '\0';
+
+ // skip all white space
+ while(tc_space(*s)) s++;
+
+ // if we reached the end, stop
+ if(!*s) break;
+
+ // store the next word
+ if(i < max_words) words[i++] = s;
+ else break;
+ }
+ else s++;
+ }
+
+ // terminate the words
+ while(i < max_words) words[i++] = NULL;
+}
+
+static pid_t tc_child_pid = 0;
+
+static void tc_main_cleanup(void *ptr) {
+ struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
+
+ info("cleaning up...");
+
+ if(tc_child_pid) {
+ info("TC: killing with SIGTERM tc-qos-helper process %d", tc_child_pid);
+ if(killpid(tc_child_pid, SIGTERM) != -1) {
+ siginfo_t info;
+
+ info("TC: waiting for tc plugin child process pid %d to exit...", tc_child_pid);
+ waitid(P_PID, (id_t) tc_child_pid, &info, WEXITED);
+ // info("TC: finished tc plugin child process pid %d.", tc_child_pid);
+ }
+
+ tc_child_pid = 0;
+ }
+
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+}
+
+void *tc_main(void *ptr) {
+ netdata_thread_cleanup_push(tc_main_cleanup, ptr);
+
+ struct rusage thread;
+
+ char command[FILENAME_MAX + 1];
+ char *words[PLUGINSD_MAX_WORDS] = { NULL };
+
+ uint32_t BEGIN_HASH = simple_hash("BEGIN");
+ uint32_t END_HASH = simple_hash("END");
+ uint32_t QDISC_HASH = simple_hash("qdisc");
+ uint32_t CLASS_HASH = simple_hash("class");
+ uint32_t SENT_HASH = simple_hash("Sent");
+ uint32_t LENDED_HASH = simple_hash("lended:");
+ uint32_t TOKENS_HASH = simple_hash("tokens:");
+ uint32_t SETDEVICENAME_HASH = simple_hash("SETDEVICENAME");
+ uint32_t SETDEVICEGROUP_HASH = simple_hash("SETDEVICEGROUP");
+ uint32_t SETCLASSNAME_HASH = simple_hash("SETCLASSNAME");
+ uint32_t WORKTIME_HASH = simple_hash("WORKTIME");
+#ifdef DETACH_PLUGINS_FROM_NETDATA
+ uint32_t MYPID_HASH = simple_hash("MYPID");
+#endif
+ uint32_t first_hash;
+
+ snprintfz(command, TC_LINE_MAX, "%s/tc-qos-helper.sh", netdata_configured_plugins_dir);
+ char *tc_script = config_get("plugin:tc", "script to run to get tc values", command);
+
+ while(!netdata_exit) {
+ FILE *fp;
+ struct tc_device *device = NULL;
+ struct tc_class *class = NULL;
+
+ snprintfz(command, TC_LINE_MAX, "exec %s %d", tc_script, localhost->rrd_update_every);
+ debug(D_TC_LOOP, "executing '%s'", command);
+
+ fp = mypopen(command, (pid_t *)&tc_child_pid);
+ if(unlikely(!fp)) {
+ error("TC: Cannot popen(\"%s\", \"r\").", command);
+ goto cleanup;
+ }
+
+ char buffer[TC_LINE_MAX+1] = "";
+ while(fgets(buffer, TC_LINE_MAX, fp) != NULL) {
+ if(unlikely(netdata_exit)) break;
+
+ buffer[TC_LINE_MAX] = '\0';
+ // debug(D_TC_LOOP, "TC: read '%s'", buffer);
+
+ tc_split_words(buffer, words, PLUGINSD_MAX_WORDS);
+
+ if(unlikely(!words[0] || !*words[0])) {
+ // debug(D_TC_LOOP, "empty line");
+ continue;
+ }
+ // else debug(D_TC_LOOP, "First word is '%s'", words[0]);
+
+ first_hash = simple_hash(words[0]);
+
+ if(unlikely(device && ((first_hash == CLASS_HASH && strcmp(words[0], "class") == 0) || (first_hash == QDISC_HASH && strcmp(words[0], "qdisc") == 0)))) {
+ // debug(D_TC_LOOP, "CLASS line on class id='%s', parent='%s', parentid='%s', leaf='%s', leafid='%s'", words[2], words[3], words[4], words[5], words[6]);
+
+ char *type = words[1]; // the class/qdisc type: htb, fq_codel, etc
+ char *id = words[2]; // the class/qdisc major:minor
+ char *parent = words[3]; // the word 'parent' or 'root'
+ char *parentid = words[4]; // parentid
+ char *leaf = words[5]; // the word 'leaf'
+ char *leafid = words[6]; // leafid
+
+ int parent_is_root = 0;
+ int parent_is_parent = 0;
+ if(likely(parent)) {
+ parent_is_parent = !strcmp(parent, "parent");
+
+ if(!parent_is_parent)
+ parent_is_root = !strcmp(parent, "root");
+ }
+
+ if(likely(type && id && (parent_is_root || parent_is_parent))) {
+ char qdisc = 0;
+
+ if(first_hash == QDISC_HASH) {
+ qdisc = 1;
+
+ if(!strcmp(type, "ingress")) {
+ // we don't want to get the ingress qdisc
+ // there should be an IFB interface for this
+
+ class = NULL;
+ continue;
+ }
+
+ if(parent_is_parent && parentid) {
+ // eliminate the minor number from parentid
+ // why: parentid is the id of the parent class
+ // but major: is also the id of the parent qdisc
+
+ char *s = parentid;
+ while(*s && *s != ':') s++;
+ if(*s == ':') s[1] = '\0';
+ }
+ }
+
+ if(parent_is_root) {
+ parentid = NULL;
+ leafid = NULL;
+ }
+ else if(!leaf || strcmp(leaf, "leaf") != 0)
+ leafid = NULL;
+
+ char leafbuf[20 + 1] = "";
+ if(leafid && leafid[strlen(leafid) - 1] == ':') {
+ strncpyz(leafbuf, leafid, 20 - 1);
+ strcat(leafbuf, "1");
+ leafid = leafbuf;
+ }
+
+ class = tc_class_add(device, id, qdisc, parentid, leafid);
+ }
+ else {
+ // clear the last class
+ class = NULL;
+ }
+ }
+ else if(unlikely(first_hash == END_HASH && strcmp(words[0], "END") == 0)) {
+ // debug(D_TC_LOOP, "END line");
+
+ if(likely(device)) {
+ netdata_thread_disable_cancelability();
+ tc_device_commit(device);
+ // tc_device_free(device);
+ netdata_thread_enable_cancelability();
+ }
+
+ device = NULL;
+ class = NULL;
+ }
+ else if(unlikely(first_hash == BEGIN_HASH && strcmp(words[0], "BEGIN") == 0)) {
+ // debug(D_TC_LOOP, "BEGIN line on device '%s'", words[1]);
+
+ if(likely(words[1] && *words[1])) {
+ device = tc_device_create(words[1]);
+ }
+ else {
+ // tc_device_free(device);
+ device = NULL;
+ }
+
+ class = NULL;
+ }
+ else if(unlikely(device && class && first_hash == SENT_HASH && strcmp(words[0], "Sent") == 0)) {
+ // debug(D_TC_LOOP, "SENT line '%s'", words[1]);
+ if(likely(words[1] && *words[1])) {
+ class->bytes = str2ull(words[1]);
+ class->updated = 1;
+ }
+ else {
+ class->updated = 0;
+ }
+
+ if(likely(words[3] && *words[3]))
+ class->packets = str2ull(words[3]);
+
+ if(likely(words[6] && *words[6]))
+ class->dropped = str2ull(words[6]);
+
+ if(likely(words[8] && *words[8]))
+ class->overlimits = str2ull(words[8]);
+
+ if(likely(words[10] && *words[10]))
+ class->requeues = str2ull(words[8]);
+ }
+ else if(unlikely(device && class && class->updated && first_hash == LENDED_HASH && strcmp(words[0], "lended:") == 0)) {
+ // debug(D_TC_LOOP, "LENDED line '%s'", words[1]);
+ if(likely(words[1] && *words[1]))
+ class->lended = str2ull(words[1]);
+
+ if(likely(words[3] && *words[3]))
+ class->borrowed = str2ull(words[3]);
+
+ if(likely(words[5] && *words[5]))
+ class->giants = str2ull(words[5]);
+ }
+ else if(unlikely(device && class && class->updated && first_hash == TOKENS_HASH && strcmp(words[0], "tokens:") == 0)) {
+ // debug(D_TC_LOOP, "TOKENS line '%s'", words[1]);
+ if(likely(words[1] && *words[1]))
+ class->tokens = str2ull(words[1]);
+
+ if(likely(words[3] && *words[3]))
+ class->ctokens = str2ull(words[3]);
+ }
+ else if(unlikely(device && first_hash == SETDEVICENAME_HASH && strcmp(words[0], "SETDEVICENAME") == 0)) {
+ // debug(D_TC_LOOP, "SETDEVICENAME line '%s'", words[1]);
+ if(likely(words[1] && *words[1]))
+ tc_device_set_device_name(device, words[1]);
+ }
+ else if(unlikely(device && first_hash == SETDEVICEGROUP_HASH && strcmp(words[0], "SETDEVICEGROUP") == 0)) {
+ // debug(D_TC_LOOP, "SETDEVICEGROUP line '%s'", words[1]);
+ if(likely(words[1] && *words[1]))
+ tc_device_set_device_family(device, words[1]);
+ }
+ else if(unlikely(device && first_hash == SETCLASSNAME_HASH && strcmp(words[0], "SETCLASSNAME") == 0)) {
+ // debug(D_TC_LOOP, "SETCLASSNAME line '%s' '%s'", words[1], words[2]);
+ char *id = words[1];
+ char *path = words[2];
+ if(likely(id && *id && path && *path))
+ tc_device_set_class_name(device, id, path);
+ }
+ else if(unlikely(first_hash == WORKTIME_HASH && strcmp(words[0], "WORKTIME") == 0)) {
+ // debug(D_TC_LOOP, "WORKTIME line '%s' '%s'", words[1], words[2]);
+ getrusage(RUSAGE_THREAD, &thread);
+
+ static RRDSET *stcpu = NULL;
+ static RRDDIM *rd_user = NULL, *rd_system = NULL;
+
+ if(unlikely(!stcpu)) {
+ stcpu = rrdset_create_localhost(
+ "netdata"
+ , "plugin_tc_cpu"
+ , NULL
+ , "tc.helper"
+ , NULL
+ , "NetData TC CPU usage"
+ , "milliseconds/s"
+ , PLUGIN_TC_NAME
+ , NULL
+ , NETDATA_CHART_PRIO_NETDATA_TC_CPU
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_STACKED
+ );
+ rd_user = rrddim_add(stcpu, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ rd_system = rrddim_add(stcpu, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(stcpu);
+
+ rrddim_set_by_pointer(stcpu, rd_user , thread.ru_utime.tv_sec * 1000000ULL + thread.ru_utime.tv_usec);
+ rrddim_set_by_pointer(stcpu, rd_system, thread.ru_stime.tv_sec * 1000000ULL + thread.ru_stime.tv_usec);
+ rrdset_done(stcpu);
+
+ static RRDSET *sttime = NULL;
+ static RRDDIM *rd_run_time = NULL;
+
+ if(unlikely(!sttime)) {
+ sttime = rrdset_create_localhost(
+ "netdata"
+ , "plugin_tc_time"
+ , NULL
+ , "tc.helper"
+ , NULL
+ , "NetData TC script execution"
+ , "milliseconds/run"
+ , PLUGIN_TC_NAME
+ , NULL
+ , NETDATA_CHART_PRIO_NETDATA_TC_TIME
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_AREA
+ );
+ rd_run_time = rrddim_add(sttime, "run_time", "run time", 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(sttime);
+
+ rrddim_set_by_pointer(sttime, rd_run_time, str2ll(words[1], NULL));
+ rrdset_done(sttime);
+
+ }
+#ifdef DETACH_PLUGINS_FROM_NETDATA
+ else if(unlikely(first_hash == MYPID_HASH && (strcmp(words[0], "MYPID") == 0))) {
+ // debug(D_TC_LOOP, "MYPID line '%s'", words[1]);
+ char *id = words[1];
+ pid_t pid = atol(id);
+
+ if(likely(pid)) tc_child_pid = pid;
+
+ debug(D_TC_LOOP, "TC: Child PID is %d.", tc_child_pid);
+ }
+#endif
+ //else {
+ // debug(D_TC_LOOP, "IGNORED line");
+ //}
+ }
+
+ // fgets() failed or loop broke
+ int code = mypclose(fp, (pid_t)tc_child_pid);
+ tc_child_pid = 0;
+
+ if(unlikely(device)) {
+ // tc_device_free(device);
+ device = NULL;
+ class = NULL;
+ }
+
+ if(unlikely(netdata_exit)) {
+ tc_device_free_all();
+ goto cleanup;
+ }
+
+ if(code == 1 || code == 127) {
+ // 1 = DISABLE
+ // 127 = cannot even run it
+ error("TC: tc-qos-helper.sh exited with code %d. Disabling it.", code);
+
+ tc_device_free_all();
+ goto cleanup;
+ }
+
+ sleep((unsigned int) localhost->rrd_update_every);
+ }
+
+cleanup: ; // added semi-colon to prevent older gcc error: label at end of compound statement
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
diff --git a/collectors/tc.plugin/plugin_tc.h b/collectors/tc.plugin/plugin_tc.h
new file mode 100644
index 000000000..c64658415
--- /dev/null
+++ b/collectors/tc.plugin/plugin_tc.h
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_PLUGIN_TC_H
+#define NETDATA_PLUGIN_TC_H 1
+
+#include "../../daemon/common.h"
+
+#if (TARGET_OS == OS_LINUX)
+
+#define NETDATA_PLUGIN_HOOK_LINUX_TC \
+ { \
+ .name = "PLUGIN[tc]", \
+ .config_section = CONFIG_SECTION_PLUGINS, \
+ .config_name = "tc", \
+ .enabled = 1, \
+ .thread = NULL, \
+ .init_routine = NULL, \
+ .start_routine = tc_main \
+ },
+
+extern void *tc_main(void *ptr);
+
+#else // (TARGET_OS == OS_LINUX)
+
+#define NETDATA_PLUGIN_HOOK_LINUX_TC
+
+#endif // (TARGET_OS == OS_LINUX)
+
+
+#endif /* NETDATA_PLUGIN_TC_H */
+
diff --git a/collectors/tc.plugin/tc-qos-helper.sh b/collectors/tc.plugin/tc-qos-helper.sh
new file mode 100644
index 000000000..b49d1f509
--- /dev/null
+++ b/collectors/tc.plugin/tc-qos-helper.sh
@@ -0,0 +1,315 @@
+#!/usr/bin/env bash
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# This script is a helper to allow netdata collect tc data.
+# tc output parsing has been implemented in C, inside netdata
+# This script allows setting names to dimensions.
+
+export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin"
+export LC_ALL=C
+
+
+# -----------------------------------------------------------------------------
+# logging functions
+
+PROGRAM_FILE="$0"
+PROGRAM_NAME="$(basename $0)"
+PROGRAM_NAME="${PROGRAM_NAME/.plugin}"
+
+logdate() {
+ date "+%Y-%m-%d %H:%M:%S"
+}
+
+log() {
+ local status="${1}"
+ shift
+
+ echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}"
+
+}
+
+warning() {
+ log WARNING "${@}"
+}
+
+error() {
+ log ERROR "${@}"
+}
+
+info() {
+ log INFO "${@}"
+}
+
+fatal() {
+ log FATAL "${@}"
+ exit 1
+}
+
+debug=0
+debug() {
+ [ $debug -eq 1 ] && log DEBUG "${@}"
+}
+
+# -----------------------------------------------------------------------------
+# find /var/run/fireqos
+
+# the default
+fireqos_run_dir="/var/run/fireqos"
+
+function realdir {
+ local r="$1"
+ local t=$(readlink "$r")
+
+ while [ "$t" ]
+ do
+ r=$(cd $(dirname "$r") && cd $(dirname "$t") && pwd -P)/$(basename "$t")
+ t=$(readlink "$r")
+ done
+
+ dirname "$r"
+}
+
+if [ ! -d "${fireqos_run_dir}" ]
+ then
+
+ # the fireqos executable - we will use it to find its config
+ fireqos="$(which fireqos 2>/dev/null || command -v fireqos 2>/dev/null)"
+
+ if [ ! -z "${fireqos}" ]
+ then
+
+ fireqos_exec_dir="$(realdir ${fireqos})"
+
+ if [ ! -z "${fireqos_exec_dir}" -a "${fireqos_exec_dir}" != "." -a -f "${fireqos_exec_dir}/install.config" ]
+ then
+
+ LOCALSTATEDIR=
+ source "${fireqos_exec_dir}/install.config"
+
+ if [ -d "${LOCALSTATEDIR}/run/fireqos" ]
+ then
+ fireqos_run_dir="${LOCALSTATEDIR}/run/fireqos"
+ else
+ warning "FireQoS is installed as '${fireqos}', its installation config at '${fireqos_exec_dir}/install.config' specifies local state data at '${LOCALSTATEDIR}/run/fireqos', but this directory is not found or is not readable (check the permissions of its parents)."
+ fi
+ else
+ warning "Although FireQoS is installed on this system as '${fireqos}', I cannot find/read its installation configuration at '${fireqos_exec_dir}/install.config'."
+ fi
+ else
+ warning "FireQoS is not installed on this system. Use FireQoS to apply traffic QoS and expose the class names to netdata. Check https://github.com/netdata/netdata/wiki/You-should-install-QoS-on-all-your-servers"
+ fi
+fi
+
+# -----------------------------------------------------------------------------
+
+[ -z "${NETDATA_PLUGINS_DIR}" ] && NETDATA_PLUGINS_DIR="$(dirname "${0}")"
+[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="/usr/local/etc/netdata"
+[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="/usr/local/lib/netdata/conf.d"
+
+plugins_dir="${NETDATA_PLUGINS_DIR}"
+tc="$(which tc 2>/dev/null || command -v tc 2>/dev/null)"
+
+
+# -----------------------------------------------------------------------------
+# user configuration
+
+# time in seconds to refresh QoS class/qdisc names
+qos_get_class_names_every=120
+
+# time in seconds to exit - netdata will restart the script
+qos_exit_every=3600
+
+# what to use? classes or qdiscs?
+tc_show="qdisc" # can also be "class"
+
+
+# -----------------------------------------------------------------------------
+# check if we have a valid number for interval
+
+t=${1}
+update_every=$((t))
+[ $((update_every)) -lt 1 ] && update_every=${NETDATA_UPDATE_EVERY}
+[ $((update_every)) -lt 1 ] && update_every=1
+
+
+# -----------------------------------------------------------------------------
+# allow the user to override our defaults
+
+for CONFIG in "${NETDATA_STOCK_CONFIG_DIR}/tc-qos-helper.conf" "${NETDATA_USER_CONFIG_DIR}/tc-qos-helper.conf"
+do
+ if [ -f "${CONFIG}" ]
+ then
+ info "Loading config file '${CONFIG}'..."
+ source "${CONFIG}"
+ [ $? -ne 0 ] && error "Failed to load config file '${CONFIG}'."
+ else
+ warning "Cannot find file '${CONFIG}'."
+ fi
+done
+
+case "${tc_show}" in
+ qdisc|class)
+ ;;
+
+ *)
+ error "tc_show variable can be either 'qdisc' or 'class' but is set to '${tc_show}'. Assuming it is 'qdisc'."
+ tc_show="qdisc"
+ ;;
+esac
+
+
+# -----------------------------------------------------------------------------
+# default sleep function
+
+LOOPSLEEPMS_LASTWORK=0
+loopsleepms() {
+ sleep $1
+}
+
+# if found and included, this file overwrites loopsleepms()
+# with a high resolution timer function for precise looping.
+. "${plugins_dir}/loopsleepms.sh.inc"
+
+
+# -----------------------------------------------------------------------------
+# final checks we can run
+
+if [ -z "${tc}" -o ! -x "${tc}" ]
+ then
+ fatal "cannot find command 'tc' in this system."
+fi
+
+tc_devices=
+fix_names=
+
+# -----------------------------------------------------------------------------
+
+setclassname() {
+ if [ "${tc_show}" = "qdisc" ]
+ then
+ echo "SETCLASSNAME $4 $2"
+ else
+ echo "SETCLASSNAME $3 $2"
+ fi
+}
+
+show_tc_cls() {
+ [ "${tc_show}" = "qdisc" ] && return 1
+
+ local x="${1}"
+
+ if [ -f /etc/iproute2/tc_cls ]
+ then
+ local classid name rest
+ while read classid name rest
+ do
+ [ -z "${classid}" -o -z "${name}" -o "${classid}" = "#" -o "${name}" = "#" -o "${classid:0:1}" = "#" -o "${name:0:1}" = "#" ] && continue
+ setclassname "" "${name}" "${classid}"
+ done </etc/iproute2/tc_cls
+ return 0
+ fi
+ return 1
+}
+
+show_fireqos_names() {
+ local x="${1}" name n interface_dev interface_classes interface_classes_monitor
+
+ if [ -f "${fireqos_run_dir}/ifaces/${x}" ]
+ then
+ name="$(<"${fireqos_run_dir}/ifaces/${x}")"
+ echo "SETDEVICENAME ${name}"
+
+ interface_dev=
+ interface_classes=
+ interface_classes_monitor=
+ source "${fireqos_run_dir}/${name}.conf"
+ for n in ${interface_classes_monitor}
+ do
+ setclassname ${n//|/ }
+ done
+ [ ! -z "${interface_dev}" ] && echo "SETDEVICEGROUP ${interface_dev}"
+
+ return 0
+ fi
+
+ return 1
+}
+
+show_tc() {
+ local x="${1}"
+
+ echo "BEGIN ${x}"
+
+ # netdata can parse the output of tc
+ ${tc} -s ${tc_show} show dev ${x}
+
+ # check FireQOS names for classes
+ if [ ! -z "${fix_names}" ]
+ then
+ show_fireqos_names "${x}" || show_tc_cls "${x}"
+ fi
+
+ echo "END ${x}"
+}
+
+find_tc_devices() {
+ local count=0 devs= dev rest l
+
+ # find all the devices in the system
+ # without forking
+ while IFS=":| " read dev rest
+ do
+ count=$((count + 1))
+ [ ${count} -le 2 ] && continue
+ devs="${devs} ${dev}"
+ done </proc/net/dev
+
+ # from all the devices find the ones
+ # that have QoS defined
+ # unfortunately, one fork per device cannot be avoided
+ tc_devices=
+ for dev in ${devs}
+ do
+ l="$(${tc} class show dev ${dev} 2>/dev/null)"
+ [ ! -z "${l}" ] && tc_devices="${tc_devices} ${dev}"
+ done
+}
+
+# update devices and class names
+# once every 2 minutes
+names_every=$((qos_get_class_names_every / update_every))
+
+# exit this script every hour
+# it will be restarted automatically
+exit_after=$((qos_exit_every / update_every))
+
+c=0
+gc=0
+while [ 1 ]
+do
+ fix_names=
+ c=$((c + 1))
+ gc=$((gc + 1))
+
+ if [ ${c} -le 1 -o ${c} -ge ${names_every} ]
+ then
+ c=1
+ fix_names="YES"
+ find_tc_devices
+ fi
+
+ for d in ${tc_devices}
+ do
+ show_tc ${d}
+ done
+
+ echo "WORKTIME ${LOOPSLEEPMS_LASTWORK}"
+
+ loopsleepms ${update_every}
+
+ [ ${gc} -gt ${exit_after} ] && exit 0
+done
diff --git a/collectors/tc.plugin/tc-qos-helper.sh.in b/collectors/tc.plugin/tc-qos-helper.sh.in
new file mode 100755
index 000000000..6f6b0a591
--- /dev/null
+++ b/collectors/tc.plugin/tc-qos-helper.sh.in
@@ -0,0 +1,315 @@
+#!/usr/bin/env bash
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# This script is a helper to allow netdata collect tc data.
+# tc output parsing has been implemented in C, inside netdata
+# This script allows setting names to dimensions.
+
+export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin"
+export LC_ALL=C
+
+
+# -----------------------------------------------------------------------------
+# logging functions
+
+PROGRAM_FILE="$0"
+PROGRAM_NAME="$(basename $0)"
+PROGRAM_NAME="${PROGRAM_NAME/.plugin}"
+
+logdate() {
+ date "+%Y-%m-%d %H:%M:%S"
+}
+
+log() {
+ local status="${1}"
+ shift
+
+ echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}"
+
+}
+
+warning() {
+ log WARNING "${@}"
+}
+
+error() {
+ log ERROR "${@}"
+}
+
+info() {
+ log INFO "${@}"
+}
+
+fatal() {
+ log FATAL "${@}"
+ exit 1
+}
+
+debug=0
+debug() {
+ [ $debug -eq 1 ] && log DEBUG "${@}"
+}
+
+# -----------------------------------------------------------------------------
+# find /var/run/fireqos
+
+# the default
+fireqos_run_dir="/var/run/fireqos"
+
+function realdir {
+ local r="$1"
+ local t=$(readlink "$r")
+
+ while [ "$t" ]
+ do
+ r=$(cd $(dirname "$r") && cd $(dirname "$t") && pwd -P)/$(basename "$t")
+ t=$(readlink "$r")
+ done
+
+ dirname "$r"
+}
+
+if [ ! -d "${fireqos_run_dir}" ]
+ then
+
+ # the fireqos executable - we will use it to find its config
+ fireqos="$(which fireqos 2>/dev/null || command -v fireqos 2>/dev/null)"
+
+ if [ ! -z "${fireqos}" ]
+ then
+
+ fireqos_exec_dir="$(realdir ${fireqos})"
+
+ if [ ! -z "${fireqos_exec_dir}" -a "${fireqos_exec_dir}" != "." -a -f "${fireqos_exec_dir}/install.config" ]
+ then
+
+ LOCALSTATEDIR=
+ source "${fireqos_exec_dir}/install.config"
+
+ if [ -d "${LOCALSTATEDIR}/run/fireqos" ]
+ then
+ fireqos_run_dir="${LOCALSTATEDIR}/run/fireqos"
+ else
+ warning "FireQoS is installed as '${fireqos}', its installation config at '${fireqos_exec_dir}/install.config' specifies local state data at '${LOCALSTATEDIR}/run/fireqos', but this directory is not found or is not readable (check the permissions of its parents)."
+ fi
+ else
+ warning "Although FireQoS is installed on this system as '${fireqos}', I cannot find/read its installation configuration at '${fireqos_exec_dir}/install.config'."
+ fi
+ else
+ warning "FireQoS is not installed on this system. Use FireQoS to apply traffic QoS and expose the class names to netdata. Check https://github.com/netdata/netdata/wiki/You-should-install-QoS-on-all-your-servers"
+ fi
+fi
+
+# -----------------------------------------------------------------------------
+
+[ -z "${NETDATA_PLUGINS_DIR}" ] && NETDATA_PLUGINS_DIR="$(dirname "${0}")"
+[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="@configdir_POST@"
+[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="@libconfigdir_POST@"
+
+plugins_dir="${NETDATA_PLUGINS_DIR}"
+tc="$(which tc 2>/dev/null || command -v tc 2>/dev/null)"
+
+
+# -----------------------------------------------------------------------------
+# user configuration
+
+# time in seconds to refresh QoS class/qdisc names
+qos_get_class_names_every=120
+
+# time in seconds to exit - netdata will restart the script
+qos_exit_every=3600
+
+# what to use? classes or qdiscs?
+tc_show="qdisc" # can also be "class"
+
+
+# -----------------------------------------------------------------------------
+# check if we have a valid number for interval
+
+t=${1}
+update_every=$((t))
+[ $((update_every)) -lt 1 ] && update_every=${NETDATA_UPDATE_EVERY}
+[ $((update_every)) -lt 1 ] && update_every=1
+
+
+# -----------------------------------------------------------------------------
+# allow the user to override our defaults
+
+for CONFIG in "${NETDATA_STOCK_CONFIG_DIR}/tc-qos-helper.conf" "${NETDATA_USER_CONFIG_DIR}/tc-qos-helper.conf"
+do
+ if [ -f "${CONFIG}" ]
+ then
+ info "Loading config file '${CONFIG}'..."
+ source "${CONFIG}"
+ [ $? -ne 0 ] && error "Failed to load config file '${CONFIG}'."
+ else
+ warning "Cannot find file '${CONFIG}'."
+ fi
+done
+
+case "${tc_show}" in
+ qdisc|class)
+ ;;
+
+ *)
+ error "tc_show variable can be either 'qdisc' or 'class' but is set to '${tc_show}'. Assuming it is 'qdisc'."
+ tc_show="qdisc"
+ ;;
+esac
+
+
+# -----------------------------------------------------------------------------
+# default sleep function
+
+LOOPSLEEPMS_LASTWORK=0
+loopsleepms() {
+ sleep $1
+}
+
+# if found and included, this file overwrites loopsleepms()
+# with a high resolution timer function for precise looping.
+. "${plugins_dir}/loopsleepms.sh.inc"
+
+
+# -----------------------------------------------------------------------------
+# final checks we can run
+
+if [ -z "${tc}" -o ! -x "${tc}" ]
+ then
+ fatal "cannot find command 'tc' in this system."
+fi
+
+tc_devices=
+fix_names=
+
+# -----------------------------------------------------------------------------
+
+setclassname() {
+ if [ "${tc_show}" = "qdisc" ]
+ then
+ echo "SETCLASSNAME $4 $2"
+ else
+ echo "SETCLASSNAME $3 $2"
+ fi
+}
+
+show_tc_cls() {
+ [ "${tc_show}" = "qdisc" ] && return 1
+
+ local x="${1}"
+
+ if [ -f /etc/iproute2/tc_cls ]
+ then
+ local classid name rest
+ while read classid name rest
+ do
+ [ -z "${classid}" -o -z "${name}" -o "${classid}" = "#" -o "${name}" = "#" -o "${classid:0:1}" = "#" -o "${name:0:1}" = "#" ] && continue
+ setclassname "" "${name}" "${classid}"
+ done </etc/iproute2/tc_cls
+ return 0
+ fi
+ return 1
+}
+
+show_fireqos_names() {
+ local x="${1}" name n interface_dev interface_classes interface_classes_monitor
+
+ if [ -f "${fireqos_run_dir}/ifaces/${x}" ]
+ then
+ name="$(<"${fireqos_run_dir}/ifaces/${x}")"
+ echo "SETDEVICENAME ${name}"
+
+ interface_dev=
+ interface_classes=
+ interface_classes_monitor=
+ source "${fireqos_run_dir}/${name}.conf"
+ for n in ${interface_classes_monitor}
+ do
+ setclassname ${n//|/ }
+ done
+ [ ! -z "${interface_dev}" ] && echo "SETDEVICEGROUP ${interface_dev}"
+
+ return 0
+ fi
+
+ return 1
+}
+
+show_tc() {
+ local x="${1}"
+
+ echo "BEGIN ${x}"
+
+ # netdata can parse the output of tc
+ ${tc} -s ${tc_show} show dev ${x}
+
+ # check FireQOS names for classes
+ if [ ! -z "${fix_names}" ]
+ then
+ show_fireqos_names "${x}" || show_tc_cls "${x}"
+ fi
+
+ echo "END ${x}"
+}
+
+find_tc_devices() {
+ local count=0 devs= dev rest l
+
+ # find all the devices in the system
+ # without forking
+ while IFS=":| " read dev rest
+ do
+ count=$((count + 1))
+ [ ${count} -le 2 ] && continue
+ devs="${devs} ${dev}"
+ done </proc/net/dev
+
+ # from all the devices find the ones
+ # that have QoS defined
+ # unfortunately, one fork per device cannot be avoided
+ tc_devices=
+ for dev in ${devs}
+ do
+ l="$(${tc} class show dev ${dev} 2>/dev/null)"
+ [ ! -z "${l}" ] && tc_devices="${tc_devices} ${dev}"
+ done
+}
+
+# update devices and class names
+# once every 2 minutes
+names_every=$((qos_get_class_names_every / update_every))
+
+# exit this script every hour
+# it will be restarted automatically
+exit_after=$((qos_exit_every / update_every))
+
+c=0
+gc=0
+while [ 1 ]
+do
+ fix_names=
+ c=$((c + 1))
+ gc=$((gc + 1))
+
+ if [ ${c} -le 1 -o ${c} -ge ${names_every} ]
+ then
+ c=1
+ fix_names="YES"
+ find_tc_devices
+ fi
+
+ for d in ${tc_devices}
+ do
+ show_tc ${d}
+ done
+
+ echo "WORKTIME ${LOOPSLEEPMS_LASTWORK}"
+
+ loopsleepms ${update_every}
+
+ [ ${gc} -gt ${exit_after} ] && exit 0
+done