summaryrefslogtreecommitdiffstats
path: root/collectors
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2019-02-08 07:30:37 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2019-02-08 07:30:37 +0000
commit8a7b72f7cd1ccd547a03eb4243294e741d661d3f (patch)
tree7bc7be4a8e9e298daa1349348400aa2a653866f2 /collectors
parentNew upstream version 1.11.1+dfsg (diff)
downloadnetdata-8a7b72f7cd1ccd547a03eb4243294e741d661d3f.tar.xz
netdata-8a7b72f7cd1ccd547a03eb4243294e741d661d3f.zip
Adding upstream version 1.12.0.upstream/1.12.0
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'collectors')
-rw-r--r--collectors/Makefile.am1
-rw-r--r--collectors/Makefile.in663
-rw-r--r--collectors/README.md5
-rw-r--r--collectors/all.h17
-rw-r--r--collectors/apps.plugin/Makefile.in521
-rw-r--r--collectors/apps.plugin/README.md15
-rw-r--r--collectors/apps.plugin/apps_groups.conf5
-rw-r--r--collectors/apps.plugin/apps_plugin.c133
-rw-r--r--collectors/cgroups.plugin/Makefile.in563
-rw-r--r--collectors/cgroups.plugin/README.md15
-rw-r--r--collectors/cgroups.plugin/cgroup-name.sh196
-rwxr-xr-xcollectors/cgroups.plugin/cgroup-name.sh.in258
-rw-r--r--collectors/cgroups.plugin/cgroup-network.c7
-rw-r--r--collectors/cgroups.plugin/sys_fs_cgroup.c44
-rw-r--r--collectors/charts.d.plugin/.keep0
-rw-r--r--collectors/charts.d.plugin/Makefile.am3
-rw-r--r--collectors/charts.d.plugin/Makefile.in953
-rw-r--r--collectors/charts.d.plugin/README.md2
-rw-r--r--collectors/charts.d.plugin/ap/README.md2
-rw-r--r--collectors/charts.d.plugin/ap/ap.chart.sh91
-rw-r--r--collectors/charts.d.plugin/apache/README.md8
-rw-r--r--collectors/charts.d.plugin/apache/apache.chart.sh79
-rw-r--r--collectors/charts.d.plugin/apcupsd/README.md7
-rw-r--r--collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh189
-rwxr-xr-xcollectors/charts.d.plugin/charts.d.dryrun-helper.sh58
-rw-r--r--collectors/charts.d.plugin/charts.d.plugin743
-rwxr-xr-xcollectors/charts.d.plugin/charts.d.plugin.in924
-rw-r--r--collectors/charts.d.plugin/cpu_apps/README.md6
-rw-r--r--collectors/charts.d.plugin/cpu_apps/cpu_apps.chart.sh20
-rw-r--r--collectors/charts.d.plugin/cpufreq/README.md6
-rw-r--r--collectors/charts.d.plugin/cpufreq/cpufreq.chart.sh18
-rw-r--r--collectors/charts.d.plugin/example/README.md4
-rw-r--r--collectors/charts.d.plugin/example/example.chart.sh15
-rw-r--r--collectors/charts.d.plugin/exim/README.md6
-rw-r--r--collectors/charts.d.plugin/exim/exim.chart.sh26
-rw-r--r--collectors/charts.d.plugin/hddtemp/README.md8
-rw-r--r--collectors/charts.d.plugin/hddtemp/hddtemp.chart.sh24
-rw-r--r--collectors/charts.d.plugin/libreswan/README.md2
-rw-r--r--collectors/charts.d.plugin/libreswan/libreswan.chart.sh32
-rw-r--r--collectors/charts.d.plugin/load_average/README.md4
-rw-r--r--collectors/charts.d.plugin/load_average/load_average.chart.sh14
-rw-r--r--collectors/charts.d.plugin/loopsleepms.sh.inc282
-rw-r--r--collectors/charts.d.plugin/mem_apps/README.md6
-rw-r--r--collectors/charts.d.plugin/mem_apps/mem_apps.chart.sh13
-rw-r--r--collectors/charts.d.plugin/mysql/README.md8
-rw-r--r--collectors/charts.d.plugin/mysql/mysql.chart.sh71
-rw-r--r--collectors/charts.d.plugin/nginx/README.md6
-rw-r--r--collectors/charts.d.plugin/nginx/nginx.chart.sh37
-rw-r--r--collectors/charts.d.plugin/nut/README.md2
-rw-r--r--collectors/charts.d.plugin/nut/nut.chart.sh35
-rw-r--r--collectors/charts.d.plugin/opensips/README.md7
-rw-r--r--collectors/charts.d.plugin/opensips/opensips.chart.sh10
-rw-r--r--collectors/charts.d.plugin/phpfpm/README.md6
-rw-r--r--collectors/charts.d.plugin/phpfpm/phpfpm.chart.sh49
-rw-r--r--collectors/charts.d.plugin/postfix/README.md8
-rw-r--r--collectors/charts.d.plugin/postfix/postfix.chart.sh14
-rw-r--r--collectors/charts.d.plugin/sensors/README.md7
-rw-r--r--collectors/charts.d.plugin/sensors/sensors.chart.sh195
-rw-r--r--collectors/charts.d.plugin/squid/README.md9
-rw-r--r--collectors/charts.d.plugin/squid/squid.chart.sh20
-rw-r--r--collectors/charts.d.plugin/tomcat/README.md6
-rw-r--r--collectors/charts.d.plugin/tomcat/tomcat.chart.sh26
-rw-r--r--collectors/checks.plugin/Makefile.in464
-rw-r--r--collectors/checks.plugin/README.md4
-rw-r--r--collectors/cups.plugin/Makefile.am9
-rw-r--r--collectors/cups.plugin/README.md49
-rw-r--r--collectors/cups.plugin/cups_plugin.c449
-rw-r--r--collectors/diskspace.plugin/Makefile.in464
-rw-r--r--collectors/diskspace.plugin/README.md29
-rw-r--r--collectors/diskspace.plugin/plugin_diskspace.c6
-rw-r--r--collectors/fping.plugin/Makefile.in591
-rw-r--r--collectors/fping.plugin/README.md2
-rw-r--r--collectors/fping.plugin/fping.plugin200
-rw-r--r--collectors/freebsd.plugin/Makefile.in464
-rw-r--r--collectors/freebsd.plugin/README.md4
-rw-r--r--collectors/freebsd.plugin/freebsd_devstat.c50
-rw-r--r--collectors/freebsd.plugin/freebsd_getifaddrs.c52
-rw-r--r--collectors/freebsd.plugin/freebsd_getmntinfo.c12
-rw-r--r--collectors/freebsd.plugin/freebsd_ipfw.c8
-rw-r--r--collectors/freebsd.plugin/freebsd_kstat_zfs.c112
-rw-r--r--collectors/freebsd.plugin/freebsd_sysctl.c60
-rw-r--r--collectors/freeipmi.plugin/Makefile.in464
-rw-r--r--collectors/freeipmi.plugin/README.md14
-rw-r--r--collectors/freeipmi.plugin/freeipmi_plugin.c44
-rw-r--r--collectors/idlejitter.plugin/Makefile.in464
-rw-r--r--collectors/idlejitter.plugin/README.md6
-rw-r--r--collectors/macos.plugin/Makefile.in464
-rw-r--r--collectors/macos.plugin/README.md4
-rw-r--r--collectors/macos.plugin/macos_fw.c14
-rw-r--r--collectors/macos.plugin/macos_mach_smi.c12
-rw-r--r--collectors/macos.plugin/macos_sysctl.c4
-rw-r--r--collectors/nfacct.plugin/Makefile.in464
-rw-r--r--collectors/nfacct.plugin/README.md2
-rw-r--r--collectors/node.d.plugin/.keep0
-rw-r--r--collectors/node.d.plugin/Makefile.am3
-rw-r--r--collectors/node.d.plugin/Makefile.in805
-rw-r--r--collectors/node.d.plugin/README.md2
-rw-r--r--collectors/node.d.plugin/fronius/README.md2
-rw-r--r--collectors/node.d.plugin/named/README.md2
-rw-r--r--collectors/node.d.plugin/node.d.plugin303
-rw-r--r--collectors/node.d.plugin/sma_webbox/README.md4
-rw-r--r--collectors/node.d.plugin/sma_webbox/sma_webbox.node.js13
-rw-r--r--collectors/node.d.plugin/snmp/README.md26
-rw-r--r--collectors/node.d.plugin/snmp/snmp.node.js4
-rw-r--r--collectors/node.d.plugin/stiebeleltron/README.md2
-rw-r--r--collectors/plugins.d/Makefile.in647
-rw-r--r--collectors/plugins.d/README.md11
-rw-r--r--collectors/proc.plugin/Makefile.in464
-rw-r--r--[-rwxr-xr-x]collectors/proc.plugin/README.md112
-rw-r--r--collectors/proc.plugin/plugin_proc.c4
-rw-r--r--collectors/proc.plugin/plugin_proc.h2
-rw-r--r--collectors/proc.plugin/proc_diskstats.c20
-rw-r--r--collectors/proc.plugin/proc_mdstat.c641
-rw-r--r--collectors/proc.plugin/proc_meminfo.c22
-rw-r--r--collectors/proc.plugin/proc_net_rpc_nfsd.c2
-rw-r--r--collectors/proc.plugin/proc_net_sockstat.c6
-rw-r--r--collectors/proc.plugin/proc_net_stat_synproxy.c2
-rw-r--r--collectors/proc.plugin/proc_spl_kstat_zfs.c43
-rw-r--r--[-rwxr-xr-x]collectors/proc.plugin/proc_stat.c347
-rw-r--r--collectors/proc.plugin/proc_vmstat.c6
-rw-r--r--collectors/proc.plugin/sys_class_power_supply.c383
-rw-r--r--collectors/proc.plugin/sys_fs_btrfs.c8
-rw-r--r--collectors/proc.plugin/sys_kernel_mm_ksm.c4
-rw-r--r--collectors/proc.plugin/zfs_common.c104
-rw-r--r--collectors/proc.plugin/zfs_common.h4
-rw-r--r--collectors/python.d.plugin/.keep0
-rw-r--r--collectors/python.d.plugin/Makefile.am3
-rw-r--r--collectors/python.d.plugin/Makefile.in2025
-rw-r--r--collectors/python.d.plugin/README.md55
-rw-r--r--collectors/python.d.plugin/adaptec_raid/README.md2
-rw-r--r--collectors/python.d.plugin/adaptec_raid/adaptec_raid.conf10
-rw-r--r--collectors/python.d.plugin/apache/README.md4
-rw-r--r--collectors/python.d.plugin/apache/apache.chart.py114
-rw-r--r--collectors/python.d.plugin/apache/apache.conf10
-rw-r--r--collectors/python.d.plugin/beanstalk/README.md2
-rw-r--r--collectors/python.d.plugin/beanstalk/beanstalk.chart.py17
-rw-r--r--collectors/python.d.plugin/beanstalk/beanstalk.conf10
-rw-r--r--collectors/python.d.plugin/bind_rndc/README.md2
-rw-r--r--collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py30
-rw-r--r--collectors/python.d.plugin/bind_rndc/bind_rndc.conf10
-rw-r--r--collectors/python.d.plugin/boinc/README.md2
-rw-r--r--collectors/python.d.plugin/boinc/boinc.chart.py14
-rw-r--r--collectors/python.d.plugin/boinc/boinc.conf10
-rw-r--r--collectors/python.d.plugin/ceph/README.md2
-rw-r--r--collectors/python.d.plugin/ceph/ceph.chart.py19
-rw-r--r--collectors/python.d.plugin/ceph/ceph.conf10
-rw-r--r--collectors/python.d.plugin/chrony/README.md2
-rw-r--r--collectors/python.d.plugin/chrony/chrony.chart.py16
-rw-r--r--collectors/python.d.plugin/chrony/chrony.conf10
-rw-r--r--collectors/python.d.plugin/couchdb/README.md2
-rw-r--r--collectors/python.d.plugin/couchdb/couchdb.chart.py47
-rw-r--r--collectors/python.d.plugin/couchdb/couchdb.conf10
-rw-r--r--collectors/python.d.plugin/cpufreq/README.md7
-rw-r--r--collectors/python.d.plugin/cpufreq/cpufreq.conf8
-rw-r--r--collectors/python.d.plugin/cpuidle/README.md2
-rw-r--r--collectors/python.d.plugin/cpuidle/cpuidle.conf8
-rw-r--r--collectors/python.d.plugin/dns_query_time/README.md2
-rw-r--r--collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py21
-rw-r--r--collectors/python.d.plugin/dns_query_time/dns_query_time.conf10
-rw-r--r--collectors/python.d.plugin/dnsdist/README.md2
-rw-r--r--collectors/python.d.plugin/dnsdist/dnsdist.chart.py4
-rw-r--r--collectors/python.d.plugin/dnsdist/dnsdist.conf10
-rw-r--r--collectors/python.d.plugin/dockerd/README.md4
-rw-r--r--collectors/python.d.plugin/dockerd/dockerd.chart.py26
-rw-r--r--collectors/python.d.plugin/dockerd/dockerd.conf10
-rw-r--r--collectors/python.d.plugin/dovecot/README.md6
-rw-r--r--collectors/python.d.plugin/dovecot/dovecot.chart.py25
-rw-r--r--collectors/python.d.plugin/dovecot/dovecot.conf14
-rw-r--r--collectors/python.d.plugin/elasticsearch/README.md2
-rw-r--r--collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py83
-rw-r--r--collectors/python.d.plugin/elasticsearch/elasticsearch.conf10
-rw-r--r--collectors/python.d.plugin/example/README.md6
-rw-r--r--collectors/python.d.plugin/example/example.chart.py9
-rw-r--r--collectors/python.d.plugin/example/example.conf10
-rw-r--r--collectors/python.d.plugin/exim/README.md2
-rw-r--r--collectors/python.d.plugin/exim/exim.chart.py13
-rw-r--r--collectors/python.d.plugin/exim/exim.conf10
-rw-r--r--collectors/python.d.plugin/fail2ban/README.md2
-rw-r--r--collectors/python.d.plugin/fail2ban/fail2ban.chart.py24
-rw-r--r--collectors/python.d.plugin/fail2ban/fail2ban.conf10
-rw-r--r--collectors/python.d.plugin/freeradius/README.md2
-rw-r--r--collectors/python.d.plugin/freeradius/freeradius.chart.py108
-rw-r--r--collectors/python.d.plugin/freeradius/freeradius.conf10
-rw-r--r--collectors/python.d.plugin/go_expvar/README.md3
-rw-r--r--collectors/python.d.plugin/go_expvar/go_expvar.chart.py69
-rw-r--r--collectors/python.d.plugin/go_expvar/go_expvar.conf10
-rw-r--r--collectors/python.d.plugin/haproxy/README.md2
-rw-r--r--collectors/python.d.plugin/haproxy/haproxy.chart.py33
-rw-r--r--collectors/python.d.plugin/haproxy/haproxy.conf10
-rw-r--r--collectors/python.d.plugin/hddtemp/README.md2
-rw-r--r--collectors/python.d.plugin/hddtemp/hddtemp.chart.py9
-rw-r--r--collectors/python.d.plugin/hddtemp/hddtemp.conf10
-rw-r--r--collectors/python.d.plugin/httpcheck/README.md2
-rw-r--r--collectors/python.d.plugin/httpcheck/httpcheck.chart.py13
-rw-r--r--collectors/python.d.plugin/httpcheck/httpcheck.conf6
-rw-r--r--collectors/python.d.plugin/icecast/README.md2
-rw-r--r--collectors/python.d.plugin/icecast/icecast.chart.py8
-rw-r--r--collectors/python.d.plugin/icecast/icecast.conf10
-rw-r--r--collectors/python.d.plugin/ipfs/README.md2
-rw-r--r--collectors/python.d.plugin/ipfs/ipfs.chart.py36
-rw-r--r--collectors/python.d.plugin/ipfs/ipfs.conf10
-rw-r--r--collectors/python.d.plugin/isc_dhcpd/README.md2
-rw-r--r--collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py30
-rw-r--r--collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf10
-rw-r--r--collectors/python.d.plugin/linux_power_supply/README.md9
-rw-r--r--collectors/python.d.plugin/linux_power_supply/linux_power_supply.conf10
-rw-r--r--collectors/python.d.plugin/litespeed/README.md2
-rw-r--r--collectors/python.d.plugin/litespeed/litespeed.chart.py14
-rw-r--r--collectors/python.d.plugin/litespeed/litespeed.conf10
-rw-r--r--collectors/python.d.plugin/logind/README.md2
-rw-r--r--collectors/python.d.plugin/logind/logind.chart.py10
-rw-r--r--collectors/python.d.plugin/logind/logind.conf10
-rw-r--r--collectors/python.d.plugin/mdstat/README.md7
-rw-r--r--collectors/python.d.plugin/mdstat/mdstat.conf8
-rw-r--r--collectors/python.d.plugin/megacli/README.md2
-rw-r--r--collectors/python.d.plugin/megacli/megacli.chart.py4
-rw-r--r--collectors/python.d.plugin/megacli/megacli.conf10
-rw-r--r--collectors/python.d.plugin/memcached/README.md2
-rw-r--r--collectors/python.d.plugin/memcached/memcached.chart.py48
-rw-r--r--collectors/python.d.plugin/memcached/memcached.conf10
-rw-r--r--collectors/python.d.plugin/mongodb/README.md29
-rw-r--r--collectors/python.d.plugin/mongodb/mongodb.chart.py22
-rw-r--r--collectors/python.d.plugin/mongodb/mongodb.conf10
-rw-r--r--collectors/python.d.plugin/monit/README.md2
-rw-r--r--collectors/python.d.plugin/monit/monit.chart.py26
-rw-r--r--collectors/python.d.plugin/monit/monit.conf10
-rw-r--r--collectors/python.d.plugin/mysql/README.md4
-rw-r--r--collectors/python.d.plugin/mysql/mysql.chart.py121
-rw-r--r--collectors/python.d.plugin/mysql/mysql.conf11
-rw-r--r--collectors/python.d.plugin/nginx/README.md3
-rw-r--r--collectors/python.d.plugin/nginx/nginx.chart.py30
-rw-r--r--collectors/python.d.plugin/nginx/nginx.conf10
-rw-r--r--collectors/python.d.plugin/nginx_plus/README.md2
-rw-r--r--collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py18
-rw-r--r--collectors/python.d.plugin/nginx_plus/nginx_plus.conf10
-rw-r--r--collectors/python.d.plugin/nsd/README.md2
-rw-r--r--collectors/python.d.plugin/nsd/nsd.chart.py30
-rw-r--r--collectors/python.d.plugin/nsd/nsd.conf10
-rw-r--r--collectors/python.d.plugin/ntpd/README.md2
-rw-r--r--collectors/python.d.plugin/ntpd/ntpd.chart.py28
-rw-r--r--collectors/python.d.plugin/ntpd/ntpd.conf10
-rw-r--r--collectors/python.d.plugin/nvidia_smi/README.md3
-rw-r--r--collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py29
-rw-r--r--collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf10
-rw-r--r--collectors/python.d.plugin/openldap/README.md2
-rw-r--r--collectors/python.d.plugin/openldap/openldap.chart.py6
-rw-r--r--collectors/python.d.plugin/openldap/openldap.conf10
-rw-r--r--collectors/python.d.plugin/ovpn_status_log/README.md2
-rw-r--r--collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py24
-rw-r--r--collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.conf10
-rw-r--r--collectors/python.d.plugin/phpfpm/README.md3
-rw-r--r--collectors/python.d.plugin/phpfpm/phpfpm.chart.py41
-rw-r--r--collectors/python.d.plugin/phpfpm/phpfpm.conf10
-rw-r--r--collectors/python.d.plugin/portcheck/README.md2
-rw-r--r--collectors/python.d.plugin/portcheck/portcheck.chart.py7
-rw-r--r--collectors/python.d.plugin/portcheck/portcheck.conf6
-rw-r--r--collectors/python.d.plugin/postfix/README.md2
-rw-r--r--collectors/python.d.plugin/postfix/postfix.chart.py15
-rw-r--r--collectors/python.d.plugin/postfix/postfix.conf10
-rw-r--r--collectors/python.d.plugin/postgres/README.md2
-rw-r--r--collectors/python.d.plugin/postgres/postgres.chart.py699
-rw-r--r--collectors/python.d.plugin/postgres/postgres.conf22
-rw-r--r--collectors/python.d.plugin/powerdns/README.md2
-rw-r--r--collectors/python.d.plugin/powerdns/powerdns.chart.py11
-rw-r--r--collectors/python.d.plugin/powerdns/powerdns.conf10
-rw-r--r--collectors/python.d.plugin/proxysql/README.md2
-rw-r--r--collectors/python.d.plugin/proxysql/proxysql.chart.py13
-rw-r--r--collectors/python.d.plugin/proxysql/proxysql.conf10
-rw-r--r--collectors/python.d.plugin/puppet/README.md7
-rw-r--r--collectors/python.d.plugin/puppet/puppet.chart.py28
-rw-r--r--collectors/python.d.plugin/puppet/puppet.conf12
-rw-r--r--collectors/python.d.plugin/python.d.conf6
-rw-r--r--collectors/python.d.plugin/python.d.plugin427
-rw-r--r--[-rwxr-xr-x]collectors/python.d.plugin/python.d.plugin.in4
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py30
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py109
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py10
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py19
-rw-r--r--collectors/python.d.plugin/python_modules/bases/charts.py2
-rw-r--r--collectors/python.d.plugin/python_modules/bases/loggers.py2
-rw-r--r--collectors/python.d.plugin/rabbitmq/README.md2
-rw-r--r--collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py122
-rw-r--r--collectors/python.d.plugin/rabbitmq/rabbitmq.conf10
-rw-r--r--collectors/python.d.plugin/redis/README.md2
-rw-r--r--collectors/python.d.plugin/redis/redis.chart.py13
-rw-r--r--collectors/python.d.plugin/redis/redis.conf10
-rw-r--r--collectors/python.d.plugin/rethinkdbs/README.md2
-rw-r--r--collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py2
-rw-r--r--collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf10
-rw-r--r--collectors/python.d.plugin/retroshare/README.md2
-rw-r--r--collectors/python.d.plugin/retroshare/retroshare.chart.py21
-rw-r--r--collectors/python.d.plugin/retroshare/retroshare.conf10
-rw-r--r--collectors/python.d.plugin/samba/README.md2
-rw-r--r--collectors/python.d.plugin/samba/samba.chart.py7
-rw-r--r--collectors/python.d.plugin/samba/samba.conf10
-rw-r--r--collectors/python.d.plugin/sensors/README.md2
-rw-r--r--collectors/python.d.plugin/sensors/sensors.chart.py4
-rw-r--r--collectors/python.d.plugin/sensors/sensors.conf8
-rw-r--r--collectors/python.d.plugin/smartd_log/README.md2
-rw-r--r--collectors/python.d.plugin/smartd_log/smartd_log.chart.py22
-rw-r--r--collectors/python.d.plugin/smartd_log/smartd_log.conf10
-rw-r--r--collectors/python.d.plugin/spigotmc/README.md2
-rw-r--r--collectors/python.d.plugin/spigotmc/spigotmc.chart.py5
-rw-r--r--collectors/python.d.plugin/spigotmc/spigotmc.conf10
-rw-r--r--collectors/python.d.plugin/springboot/README.md2
-rw-r--r--collectors/python.d.plugin/springboot/springboot.chart.py15
-rw-r--r--collectors/python.d.plugin/springboot/springboot.conf10
-rw-r--r--collectors/python.d.plugin/squid/README.md2
-rw-r--r--collectors/python.d.plugin/squid/squid.chart.py13
-rw-r--r--collectors/python.d.plugin/squid/squid.conf10
-rw-r--r--collectors/python.d.plugin/tomcat/README.md2
-rw-r--r--collectors/python.d.plugin/tomcat/tomcat.chart.py63
-rw-r--r--collectors/python.d.plugin/tomcat/tomcat.conf10
-rw-r--r--collectors/python.d.plugin/tor/README.md2
-rw-r--r--collectors/python.d.plugin/tor/tor.chart.py4
-rw-r--r--collectors/python.d.plugin/tor/tor.conf10
-rw-r--r--collectors/python.d.plugin/traefik/README.md3
-rw-r--r--collectors/python.d.plugin/traefik/traefik.chart.py29
-rw-r--r--collectors/python.d.plugin/traefik/traefik.conf10
-rw-r--r--collectors/python.d.plugin/unbound/README.md2
-rw-r--r--collectors/python.d.plugin/unbound/unbound.chart.py6
-rw-r--r--collectors/python.d.plugin/unbound/unbound.conf10
-rw-r--r--collectors/python.d.plugin/uwsgi/README.md2
-rw-r--r--collectors/python.d.plugin/uwsgi/uwsgi.chart.py22
-rw-r--r--collectors/python.d.plugin/uwsgi/uwsgi.conf10
-rw-r--r--collectors/python.d.plugin/varnish/README.md10
-rw-r--r--collectors/python.d.plugin/varnish/varnish.chart.py34
-rw-r--r--collectors/python.d.plugin/varnish/varnish.conf14
-rw-r--r--collectors/python.d.plugin/w1sensor/README.md2
-rw-r--r--collectors/python.d.plugin/w1sensor/w1sensor.chart.py4
-rw-r--r--collectors/python.d.plugin/w1sensor/w1sensor.conf10
-rw-r--r--collectors/python.d.plugin/web_log/README.md4
-rw-r--r--collectors/python.d.plugin/web_log/web_log.chart.py6
-rw-r--r--collectors/python.d.plugin/web_log/web_log.conf10
-rw-r--r--collectors/statsd.plugin/.keep0
-rw-r--r--collectors/statsd.plugin/Makefile.am3
-rw-r--r--collectors/statsd.plugin/Makefile.in556
-rw-r--r--collectors/statsd.plugin/README.md16
-rw-r--r--collectors/statsd.plugin/statsd.c2
-rw-r--r--collectors/tc.plugin/Makefile.in562
-rw-r--r--collectors/tc.plugin/README.md65
-rw-r--r--collectors/tc.plugin/tc-qos-helper.sh315
-rwxr-xr-xcollectors/tc.plugin/tc-qos-helper.sh.in309
343 files changed, 6089 insertions, 17191 deletions
diff --git a/collectors/Makefile.am b/collectors/Makefile.am
index 4ecd1f17..bb4d5c61 100644
--- a/collectors/Makefile.am
+++ b/collectors/Makefile.am
@@ -8,6 +8,7 @@ SUBDIRS = \
cgroups.plugin \
charts.d.plugin \
checks.plugin \
+ cups.plugin \
diskspace.plugin \
fping.plugin \
freebsd.plugin \
diff --git a/collectors/Makefile.in b/collectors/Makefile.in
deleted file mode 100644
index 357f69d7..00000000
--- a/collectors/Makefile.in
+++ /dev/null
@@ -1,663 +0,0 @@
-# Makefile.in generated by automake 1.14.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2013 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-VPATH = @srcdir@
-am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-subdir = collectors
-DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
- $(dist_noinst_DATA)
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \
- ctags-recursive dvi-recursive html-recursive info-recursive \
- install-data-recursive install-dvi-recursive \
- install-exec-recursive install-html-recursive \
- install-info-recursive install-pdf-recursive \
- install-ps-recursive install-recursive installcheck-recursive \
- installdirs-recursive pdf-recursive ps-recursive \
- tags-recursive uninstall-recursive
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-DATA = $(dist_noinst_DATA)
-RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \
- distclean-recursive maintainer-clean-recursive
-am__recursive_targets = \
- $(RECURSIVE_TARGETS) \
- $(RECURSIVE_CLEAN_TARGETS) \
- $(am__extra_recursive_targets)
-AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \
- distdir
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-# Read a list of newline-separated strings from the standard input,
-# and print each of them once, without duplicates. Input order is
-# *not* preserved.
-am__uniquify_input = $(AWK) '\
- BEGIN { nonempty = 0; } \
- { items[$$0] = 1; nonempty = 1; } \
- END { if (nonempty) { for (i in items) print i; }; } \
-'
-# Make sure the list of sources is unique. This is necessary because,
-# e.g., the same source file might be shared among _SOURCES variables
-# for different programs/libraries.
-am__define_uniq_tagged_files = \
- list='$(am__tagged_files)'; \
- unique=`for i in $$list; do \
- if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
- done | $(am__uniquify_input)`
-ETAGS = etags
-CTAGS = ctags
-DIST_SUBDIRS = $(SUBDIRS)
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-am__relativize = \
- dir0=`pwd`; \
- sed_first='s,^\([^/]*\)/.*$$,\1,'; \
- sed_rest='s,^[^/]*/*,,'; \
- sed_last='s,^.*/\([^/]*\)$$,\1,'; \
- sed_butlast='s,/*[^/]*$$,,'; \
- while test -n "$$dir1"; do \
- first=`echo "$$dir1" | sed -e "$$sed_first"`; \
- if test "$$first" != "."; then \
- if test "$$first" = ".."; then \
- dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \
- dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \
- else \
- first2=`echo "$$dir2" | sed -e "$$sed_first"`; \
- if test "$$first2" = "$$first"; then \
- dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \
- else \
- dir2="../$$dir2"; \
- fi; \
- dir0="$$dir0"/"$$first"; \
- fi; \
- fi; \
- dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \
- done; \
- reldir="$$dir2"
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-SUBDIRS = \
- plugins.d \
- apps.plugin \
- cgroups.plugin \
- charts.d.plugin \
- checks.plugin \
- diskspace.plugin \
- fping.plugin \
- freebsd.plugin \
- freeipmi.plugin \
- idlejitter.plugin \
- macos.plugin \
- nfacct.plugin \
- node.d.plugin \
- proc.plugin \
- python.d.plugin \
- statsd.plugin \
- tc.plugin \
- $(NULL)
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
-
-all: all-recursive
-
-.SUFFIXES:
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/Makefile
-.PRECIOUS: Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-
-# This directory's subdirectories are mostly independent; you can cd
-# into them and run 'make' without going through this Makefile.
-# To change the values of 'make' variables: instead of editing Makefiles,
-# (1) if the variable is set in 'config.status', edit 'config.status'
-# (which will cause the Makefiles to be regenerated when you run 'make');
-# (2) otherwise, pass the desired values on the 'make' command line.
-$(am__recursive_targets):
- @fail=; \
- if $(am__make_keepgoing); then \
- failcom='fail=yes'; \
- else \
- failcom='exit 1'; \
- fi; \
- dot_seen=no; \
- target=`echo $@ | sed s/-recursive//`; \
- case "$@" in \
- distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
- *) list='$(SUBDIRS)' ;; \
- esac; \
- for subdir in $$list; do \
- echo "Making $$target in $$subdir"; \
- if test "$$subdir" = "."; then \
- dot_seen=yes; \
- local_target="$$target-am"; \
- else \
- local_target="$$target"; \
- fi; \
- ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
- || eval $$failcom; \
- done; \
- if test "$$dot_seen" = "no"; then \
- $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
- fi; test -z "$$fail"
-
-ID: $(am__tagged_files)
- $(am__define_uniq_tagged_files); mkid -fID $$unique
-tags: tags-recursive
-TAGS: tags
-
-tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
- set x; \
- here=`pwd`; \
- if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
- include_option=--etags-include; \
- empty_fix=.; \
- else \
- include_option=--include; \
- empty_fix=; \
- fi; \
- list='$(SUBDIRS)'; for subdir in $$list; do \
- if test "$$subdir" = .; then :; else \
- test ! -f $$subdir/TAGS || \
- set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
- fi; \
- done; \
- $(am__define_uniq_tagged_files); \
- shift; \
- if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
- test -n "$$unique" || unique=$$empty_fix; \
- if test $$# -gt 0; then \
- $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
- "$$@" $$unique; \
- else \
- $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
- $$unique; \
- fi; \
- fi
-ctags: ctags-recursive
-
-CTAGS: ctags
-ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
- $(am__define_uniq_tagged_files); \
- test -z "$(CTAGS_ARGS)$$unique" \
- || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
- $$unique
-
-GTAGS:
- here=`$(am__cd) $(top_builddir) && pwd` \
- && $(am__cd) $(top_srcdir) \
- && gtags -i $(GTAGS_ARGS) "$$here"
-cscopelist: cscopelist-recursive
-
-cscopelist-am: $(am__tagged_files)
- list='$(am__tagged_files)'; \
- case "$(srcdir)" in \
- [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
- *) sdir=$(subdir)/$(srcdir) ;; \
- esac; \
- for i in $$list; do \
- if test -f "$$i"; then \
- echo "$(subdir)/$$i"; \
- else \
- echo "$$sdir/$$i"; \
- fi; \
- done >> $(top_builddir)/cscope.files
-
-distclean-tags:
- -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
- @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
- if test "$$subdir" = .; then :; else \
- $(am__make_dryrun) \
- || test -d "$(distdir)/$$subdir" \
- || $(MKDIR_P) "$(distdir)/$$subdir" \
- || exit 1; \
- dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
- $(am__relativize); \
- new_distdir=$$reldir; \
- dir1=$$subdir; dir2="$(top_distdir)"; \
- $(am__relativize); \
- new_top_distdir=$$reldir; \
- echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \
- echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \
- ($(am__cd) $$subdir && \
- $(MAKE) $(AM_MAKEFLAGS) \
- top_distdir="$$new_top_distdir" \
- distdir="$$new_distdir" \
- am__remove_distdir=: \
- am__skip_length_check=: \
- am__skip_mode_fix=: \
- distdir) \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-recursive
-all-am: Makefile $(DATA)
-installdirs: installdirs-recursive
-installdirs-am:
-install: install-recursive
-install-exec: install-exec-recursive
-install-data: install-data-recursive
-uninstall: uninstall-recursive
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-recursive
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-recursive
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-recursive
- -rm -f Makefile
-distclean-am: clean-am distclean-generic distclean-tags
-
-dvi: dvi-recursive
-
-dvi-am:
-
-html: html-recursive
-
-html-am:
-
-info: info-recursive
-
-info-am:
-
-install-data-am:
-
-install-dvi: install-dvi-recursive
-
-install-dvi-am:
-
-install-exec-am:
-
-install-html: install-html-recursive
-
-install-html-am:
-
-install-info: install-info-recursive
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-recursive
-
-install-pdf-am:
-
-install-ps: install-ps-recursive
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-recursive
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-recursive
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-recursive
-
-pdf-am:
-
-ps: ps-recursive
-
-ps-am:
-
-uninstall-am:
-
-.MAKE: $(am__recursive_targets) install-am install-strip
-
-.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \
- check-am clean clean-generic cscopelist-am ctags ctags-am \
- distclean distclean-generic distclean-tags distdir dvi dvi-am \
- html html-am info info-am install install-am install-data \
- install-data-am install-dvi install-dvi-am install-exec \
- install-exec-am install-html install-html-am install-info \
- install-info-am install-man install-pdf install-pdf-am \
- install-ps install-ps-am install-strip installcheck \
- installcheck-am installdirs installdirs-am maintainer-clean \
- maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
- pdf-am ps ps-am tags tags-am uninstall uninstall-am
-
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/README.md b/collectors/README.md
index 83c92d9d..d0393dae 100644
--- a/collectors/README.md
+++ b/collectors/README.md
@@ -1,4 +1,4 @@
-# Data Collection Plugins
+# Data collection plugins
netdata supports **internal** and **external** data collection plugins:
@@ -27,6 +27,7 @@ plugin|lang|O/S|runs as|modular|description
[cgroups.plugin](cgroups.plugin/)|`C`|linux|internal|-|collects resource usage of **Containers**, libvirt **VMs** and **systemd services**, on Linux systems
[charts.d.plugin](charts.d.plugin/)|`BASH` v4+|any|external|yes|a **plugin orchestrator** for data collection modules written in `BASH` v4+.
[checks.plugin](checks.plugin/)|`C`|any|internal|-|a debugging plugin (by default it is disabled)
+[cups.plugin](cups.plugin/)|`C`|any|external|-|monitors **CUPS**
[diskspace.plugin](diskspace.plugin/)|`C`|linux|internal|-|collects disk space usage metrics on Linux mount points
[fping.plugin](fping.plugin/)|`C`|any|external|-|measures network latency, jitter and packet loss between the monitored node and any number of remote network end points.
[freebsd.plugin](freebsd.plugin/)|`C`|freebsd|internal|yes|collects resource usage and performance data on FreeBSD systems
@@ -116,3 +117,5 @@ The best way to find your way through this, is to examine what other similar plu
**External plugins** use the API and are managed by [plugins.d](plugins.d/).
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/all.h b/collectors/all.h
index aa19bd5b..7817d89b 100644
--- a/collectors/all.h
+++ b/collectors/all.h
@@ -69,6 +69,7 @@
#define NETDATA_CHART_PRIO_CPU_PER_CORE 1000 // +1 per core
#define NETDATA_CHART_PRIO_CPU_TEMPERATURE 1050 // freebsd only
#define NETDATA_CHART_PRIO_CPUFREQ_SCALING_CUR_FREQ 5003 // freebsd only
+#define NETDATA_CHART_PRIO_CPUIDLE 6000
#define NETDATA_CHART_PRIO_CORE_THROTTLING 5001
#define NETDATA_CHART_PRIO_PACKAGE_THROTTLING 5002
@@ -297,6 +298,22 @@
#define NETDATA_CHART_PRIO_SYNPROXY_CONN_OPEN 8753
#define NETDATA_CHART_PRIO_SYNPROXY_ENTRIES 8754
+// MDSTAT
+
+#define NETDATA_CHART_PRIO_MDSTAT_HEALTH 9000
+#define NETDATA_CHART_PRIO_MDSTAT_NONREDUNDANT 9001
+#define NETDATA_CHART_PRIO_MDSTAT_DISKS 9002 // 5 charts per raid
+#define NETDATA_CHART_PRIO_MDSTAT_MISMATCH 9003
+#define NETDATA_CHART_PRIO_MDSTAT_OPERATION 9004
+#define NETDATA_CHART_PRIO_MDSTAT_FINISH 9005
+#define NETDATA_CHART_PRIO_MDSTAT_SPEED 9006
+
+// Linux Power Supply
+#define NETDATA_CHART_PRIO_POWER_SUPPLY_CAPACITY 9500 // 4 charts per power supply
+#define NETDATA_CHART_PRIO_POWER_SUPPLY_CHARGE 9501
+#define NETDATA_CHART_PRIO_POWER_SUPPLY_ENERGY 9502
+#define NETDATA_CHART_PRIO_POWER_SUPPLY_VOLTAGE 9503
+
// CGROUPS
#define NETDATA_CHART_PRIO_CGROUPS_SYSTEMD 19000 // many charts
diff --git a/collectors/apps.plugin/Makefile.in b/collectors/apps.plugin/Makefile.in
deleted file mode 100644
index 38120c04..00000000
--- a/collectors/apps.plugin/Makefile.in
+++ /dev/null
@@ -1,521 +0,0 @@
-# Makefile.in generated by automake 1.14.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2013 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-VPATH = @srcdir@
-am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-subdir = collectors/apps.plugin
-DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
- $(dist_libconfig_DATA) $(dist_noinst_DATA)
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
-am__vpath_adj = case $$p in \
- $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
- *) f=$$p;; \
- esac;
-am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
-am__install_max = 40
-am__nobase_strip_setup = \
- srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
-am__nobase_strip = \
- for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
-am__nobase_list = $(am__nobase_strip_setup); \
- for p in $$list; do echo "$$p $$p"; done | \
- sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
- $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
- if (++n[$$2] == $(am__install_max)) \
- { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
- END { for (dir in files) print dir, files[dir] }'
-am__base_list = \
- sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
- sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
-am__uninstall_files_from_dir = { \
- test -z "$$files" \
- || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
- || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
- $(am__cd) "$$dir" && rm -f $$files; }; \
- }
-am__installdirs = "$(DESTDIR)$(libconfigdir)"
-DATA = $(dist_libconfig_DATA) $(dist_noinst_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-dist_noinst_DATA = \
- README.md \
- $(NULL)
-
-dist_libconfig_DATA = \
- apps_groups.conf \
- $(NULL)
-
-all: all-am
-
-.SUFFIXES:
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/apps.plugin/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/apps.plugin/Makefile
-.PRECIOUS: Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-install-dist_libconfigDATA: $(dist_libconfig_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \
- done
-
-uninstall-dist_libconfigDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir)
-tags TAGS:
-
-ctags CTAGS:
-
-cscope cscopelist:
-
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(DATA)
-installdirs:
- for dir in "$(DESTDIR)$(libconfigdir)"; do \
- test -z "$$dir" || $(MKDIR_P) "$$dir"; \
- done
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-am
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-generic
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am: install-dist_libconfigDATA
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am:
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am: uninstall-dist_libconfigDATA
-
-.MAKE: install-am install-strip
-
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dist_libconfigDATA install-dvi \
- install-dvi-am install-exec install-exec-am install-html \
- install-html-am install-info install-info-am install-man \
- install-pdf install-pdf-am install-ps install-ps-am \
- install-strip installcheck installcheck-am installdirs \
- maintainer-clean maintainer-clean-generic mostlyclean \
- mostlyclean-generic pdf pdf-am ps ps-am tags-am uninstall \
- uninstall-am uninstall-dist_libconfigDATA
-
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/apps.plugin/README.md b/collectors/apps.plugin/README.md
index d1ca8114..ee5c6971 100644
--- a/collectors/apps.plugin/README.md
+++ b/collectors/apps.plugin/README.md
@@ -77,7 +77,7 @@ To do this, edit `/etc/netdata/netdata.conf` and find this section:
```
[plugin:apps]
# update every = 1
- # command options =
+ # command options =
```
Uncomment the line `update every` and set it to a higher number. If you just set it to ` 2 `,
@@ -100,7 +100,8 @@ For the **Applications** section, only groups configured in this file are report
All other processes will be reported as `other`.
For each process given, its whole process tree will be grouped, not just the process matched.
-The plugin will include both parents and children.
+The plugin will include both parents and children. If including the parents into the group is
+undesirable, the line `other: *` should be appended to the `apps_groups.conf`.
The process names are the ones returned by:
@@ -254,7 +255,7 @@ Exactly like `top`, `htop` is providing an incomplete breakdown of the system CP
```
CPU[||||||||||||||||||||||||100.0%] Tasks: 27, 11 thr; 2 running
- Mem[||||||||||||||||||||85.4M/993M] Load average: 1.16 0.88 0.90
+ Mem[||||||||||||||||||||85.4M/993M] Load average: 1.16 0.88 0.90
Swp[ 0K/0K] Uptime: 3 days, 21:37:03
PID USER PRI NI VIRT RES SHR S CPU% MEM% TIME+ Command
@@ -305,10 +306,10 @@ MEM [ 23.7%] user: 30.9% total: 993M total: 0 1 min: 1.18
SWAP [ 0.0%] system: 67.8% used: 236M used: 0 5 min: 1.08
idle: 0.0% free: 757M free: 0 15 min: 1.00
-NETWORK Rx/s Tx/s TASKS 75 (90 thr), 1 run, 74 slp, 0 oth
+NETWORK Rx/s Tx/s TASKS 75 (90 thr), 1 run, 74 slp, 0 oth
eth0 168b 2Kb
-eth1 0b 0b CPU% MEM% PID USER NI S Command
-lo 0b 0b 13.5 0.4 12789 root 0 S -bash
+eth1 0b 0b CPU% MEM% PID USER NI S Command
+lo 0b 0b 13.5 0.4 12789 root 0 S -bash
1.6 2.2 7025 root 0 R /usr/bin/python /u
DISK I/O R/s W/s 1.0 0.0 9 root 0 S rcuos/0
vda1 0 4K 0.3 0.2 7024 netdata 0 S /usr/libexec/netda
@@ -370,3 +371,5 @@ It is even trickier, because walking through the entire process tree takes some
if you sum the CPU utilization of all processes, you might have more CPU time than the reported
total cpu time of the system. netdata solves this, by adapting the per process cpu utilization to
the total of the system. [Netdata adds charts that document this normalization](https://london.my-netdata.io/default.html#menu_netdata_submenu_apps_plugin).
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fapps.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/apps.plugin/apps_groups.conf b/collectors/apps.plugin/apps_groups.conf
index c0d22fac..91206410 100644
--- a/collectors/apps.plugin/apps_groups.conf
+++ b/collectors/apps.plugin/apps_groups.conf
@@ -79,6 +79,7 @@ node.d.plugin: *node.d.plugin*
python.d.plugin: *python.d.plugin*
tc-qos-helper: *tc-qos-helper.sh*
fping: fping
+go.d.plugin: *go.d.plugin*
# -----------------------------------------------------------------------------
# authentication/authorization related servers
@@ -256,7 +257,8 @@ airflow: *airflow*
# X
X: X Xorg xinit lightdm xdm pulseaudio gkrellm xfwm4 xfdesktop xfce* Thunar
-X: xfsettingsd xfconfd gnome-* gdm gconf* dconf* xfconf* *gvfs gvfs* kdm slim
+X: xfsettingsd xfconfd gnome-* gdm gconf* dconf* xfconf* *gvfs gvfs* slim
+X: kdeinit* kdm plasmashell
X: evolution-* firefox chromium opera vivaldi-bin epiphany WebKit*
X: '*systemd --user*' chrome *chrome-sandbox* *google-chrome* *chromium* *firefox*
@@ -284,3 +286,4 @@ java: java
ipfs: ipfs
node: node
+factorio: factorio
diff --git a/collectors/apps.plugin/apps_plugin.c b/collectors/apps.plugin/apps_plugin.c
index f592e9fc..9f392679 100644
--- a/collectors/apps.plugin/apps_plugin.c
+++ b/collectors/apps.plugin/apps_plugin.c
@@ -15,6 +15,12 @@ void netdata_cleanup_and_exit(int ret) {
exit(ret);
}
+void send_statistics( const char *action, const char *action_result, const char *action_data) {
+ (void) action;
+ (void) action_result;
+ (void) action_data;
+ return;
+}
// callbacks required by popen()
void signals_block(void) {};
void signals_unblock(void) {};
@@ -99,6 +105,9 @@ static inline void debug_log_dummy(void) {}
// etc.
#define RATES_DETAIL 10000ULL
+// ----------------------------------------------------------------------------
+// factor for calculating correct CPU time values depending on units of raw data
+static unsigned int time_factor = 0;
// ----------------------------------------------------------------------------
// to avoid reallocating too frequently, we can increase the number of spare
@@ -107,7 +116,6 @@ static inline void debug_log_dummy(void) {}
// having a lot of spares, increases the CPU utilization of the plugin.
#define MAX_SPARE_FDS 1
-
// ----------------------------------------------------------------------------
// command line options
@@ -166,12 +174,10 @@ static size_t
// metric.
// the total system time, as reported by /proc/stat
-#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
static kernel_uint_t
global_utime = 0,
global_stime = 0,
global_gtime = 0;
-#endif
// the normalization ratios, as calculated by normalize_utilization()
double utime_fix_ratio = 1.0,
@@ -1038,8 +1044,8 @@ static inline int read_proc_pid_status(struct pid_stat *p, void *ptr) {
p->uid = proc_info->ki_uid;
p->gid = proc_info->ki_groups[0];
- p->status_vmsize = proc_info->ki_size / 1024; // in kB
- p->status_vmrss = proc_info->ki_rssize * pagesize / 1024; // in kB
+ p->status_vmsize = proc_info->ki_size / 1024; // in KiB
+ p->status_vmrss = proc_info->ki_rssize * pagesize / 1024; // in KiB
// TODO: what about shared and swap memory on FreeBSD?
return 1;
#else
@@ -1327,8 +1333,8 @@ cleanup:
#endif
}
-#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
-static inline int read_proc_stat() {
+#ifndef __FreeBSD__
+static inline int read_global_time() {
static char filename[FILENAME_MAX + 1] = "";
static procfile *ff = NULL;
static kernel_uint_t utime_raw = 0, stime_raw = 0, gtime_raw = 0, gntime_raw = 0, ntime_raw = 0;
@@ -1386,10 +1392,50 @@ cleanup:
return 0;
}
#else
-static inline int read_proc_stat() {
+static inline int read_global_time() {
+ static kernel_uint_t utime_raw = 0, stime_raw = 0, gtime_raw = 0, ntime_raw = 0;
+ static usec_t collected_usec = 0, last_collected_usec = 0;
+ long cp_time[CPUSTATES];
+
+ if (unlikely(CPUSTATES != 5)) {
+ goto cleanup;
+ } else {
+ static int mib[2] = {0, 0};
+
+ if (unlikely(GETSYSCTL_SIMPLE("kern.cp_time", mib, cp_time))) {
+ goto cleanup;
+ }
+ }
+
+ last_collected_usec = collected_usec;
+ collected_usec = now_monotonic_usec();
+
+ calls_counter++;
+
+ // temporary - it is added global_ntime;
+ kernel_uint_t global_ntime = 0;
+
+ incremental_rate(global_utime, utime_raw, cp_time[0] * 100LLU / system_hz, collected_usec, last_collected_usec);
+ incremental_rate(global_ntime, ntime_raw, cp_time[1] * 100LLU / system_hz, collected_usec, last_collected_usec);
+ incremental_rate(global_stime, stime_raw, cp_time[2] * 100LLU / system_hz, collected_usec, last_collected_usec);
+
+ global_utime += global_ntime;
+
+ if(unlikely(global_iterations_counter == 1)) {
+ global_utime = 0;
+ global_stime = 0;
+ global_gtime = 0;
+ }
+
+ return 1;
+
+cleanup:
+ global_utime = 0;
+ global_stime = 0;
+ global_gtime = 0;
return 0;
}
-#endif
+#endif /* !__FreeBSD__ */
// ----------------------------------------------------------------------------
@@ -2289,7 +2335,7 @@ static int collect_data_for_all_processes(void) {
size_t new_procbase_size;
- int mib[3] = { CTL_KERN, KERN_PROC, KERN_PROC_PROC };
+ int mib[3] = { CTL_KERN, KERN_PROC, KERN_PROC_ALL };
if (unlikely(sysctl(mib, 3, NULL, &new_procbase_size, NULL, 0))) {
error("sysctl error: Can't get processes data size");
return 0;
@@ -2396,7 +2442,7 @@ static int collect_data_for_all_processes(void) {
return 0;
// we need /proc/stat to normalize the cpu consumption of the exited childs
- read_proc_stat();
+ read_global_time();
// build the process tree
link_all_processes_to_their_parents();
@@ -2884,7 +2930,6 @@ void send_resource_usage_to_netdata(usec_t dt) {
, update_every
);
-#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
fprintf(stdout,
"CHART netdata.apps_fix '' 'Apps Plugin Normalization Ratios' 'percentage' apps.plugin netdata.apps_fix line 140002 %1$d\n"
"DIMENSION utime '' absolute 1 %2$llu\n"
@@ -2907,7 +2952,6 @@ void send_resource_usage_to_netdata(usec_t dt) {
, update_every
, RATES_DETAIL
);
-#endif
}
@@ -2942,7 +2986,6 @@ void send_resource_usage_to_netdata(usec_t dt) {
, targets_assignment_counter
);
-#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
fprintf(stdout,
"BEGIN netdata.apps_fix %llu\n"
"SET utime = %u\n"
@@ -2975,10 +3018,8 @@ void send_resource_usage_to_netdata(usec_t dt) {
, (unsigned int)(cminflt_fix_ratio * 100 * RATES_DETAIL)
, (unsigned int)(cmajflt_fix_ratio * 100 * RATES_DETAIL)
);
-#endif
}
-#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
static void normalize_utilization(struct target *root) {
struct target *w;
@@ -2986,7 +3027,7 @@ static void normalize_utilization(struct target *root) {
// here we try to eliminate them by disabling childs processing either for specific dimensions
// or entirely. Of course, either way, we disable it just a single iteration.
- kernel_uint_t max_time = processors * system_hz * RATES_DETAIL;
+ kernel_uint_t max_time = processors * time_factor * RATES_DETAIL;
kernel_uint_t utime = 0, cutime = 0, stime = 0, cstime = 0, gtime = 0, cgtime = 0, minflt = 0, cminflt = 0, majflt = 0, cmajflt = 0;
if(global_utime > max_time) global_utime = max_time;
@@ -3009,7 +3050,7 @@ static void normalize_utilization(struct target *root) {
cmajflt += w->cmajflt;
}
- if((global_utime || global_stime || global_gtime) && (utime || stime || gtime)) {
+ if(global_utime || global_stime || global_gtime) {
if(global_utime + global_stime + global_gtime > utime + cutime + stime + cstime + gtime + cgtime) {
// everything we collected fits
utime_fix_ratio =
@@ -3019,7 +3060,7 @@ static void normalize_utilization(struct target *root) {
cstime_fix_ratio =
cgtime_fix_ratio = 1.0; //(double)(global_utime + global_stime) / (double)(utime + cutime + stime + cstime);
}
- else if(global_utime + global_stime > utime + stime) {
+ else if((global_utime + global_stime > utime + stime) && (cutime || cstime)) {
// childrens resources are too high
// lower only the children resources
utime_fix_ratio =
@@ -3029,7 +3070,7 @@ static void normalize_utilization(struct target *root) {
cstime_fix_ratio =
cgtime_fix_ratio = (double)((global_utime + global_stime) - (utime + stime)) / (double)(cutime + cstime);
}
- else {
+ else if(utime || stime) {
// even running processes are unrealistic
// zero the children resources
// lower the running processes resources
@@ -3040,6 +3081,14 @@ static void normalize_utilization(struct target *root) {
cstime_fix_ratio =
cgtime_fix_ratio = 0.0;
}
+ else {
+ utime_fix_ratio =
+ stime_fix_ratio =
+ gtime_fix_ratio =
+ cutime_fix_ratio =
+ cstime_fix_ratio =
+ cgtime_fix_ratio = 0.0;
+ }
}
else {
utime_fix_ratio =
@@ -3121,11 +3170,6 @@ static void normalize_utilization(struct target *root) {
, (kernel_uint_t)(cgtime * cgtime_fix_ratio)
);
}
-#else // ALL_PIDS_ARE_READ_INSTANTLY == 1
-static void normalize_utilization(struct target *root) {
- (void)root;
-}
-#endif // ALL_PIDS_ARE_READ_INSTANTLY
static void send_collected_data_to_netdata(struct target *root, const char *type, usec_t dt) {
struct target *w;
@@ -3196,7 +3240,7 @@ static void send_collected_data_to_netdata(struct target *root, const char *type
}
send_END();
#endif
-
+
send_BEGIN(type, "minor_faults", dt);
for (w = root; w ; w = w->next) {
if(unlikely(w->exposed))
@@ -3290,19 +3334,19 @@ static void send_charts_updates_to_netdata(struct target *root, const char *type
// we have something new to show
// update the charts
- fprintf(stdout, "CHART %s.cpu '' '%s CPU Time (%d%% = %d core%s)' 'cpu time %%' cpu %s.cpu stacked 20001 %d\n", type, title, (processors * 100), processors, (processors>1)?"s":"", type, update_every);
+ fprintf(stdout, "CHART %s.cpu '' '%s CPU Time (%d%% = %d core%s)' 'percentage' cpu %s.cpu stacked 20001 %d\n", type, title, (processors * 100), processors, (processors>1)?"s":"", type, update_every);
for (w = root; w ; w = w->next) {
if(unlikely(w->exposed))
- fprintf(stdout, "DIMENSION %s '' absolute 1 %llu %s\n", w->name, system_hz * RATES_DETAIL / 100, w->hidden ? "hidden" : "");
+ fprintf(stdout, "DIMENSION %s '' absolute 1 %llu %s\n", w->name, time_factor * RATES_DETAIL / 100, w->hidden ? "hidden" : "");
}
- fprintf(stdout, "CHART %s.mem '' '%s Real Memory (w/o shared)' 'MB' mem %s.mem stacked 20003 %d\n", type, title, type, update_every);
+ fprintf(stdout, "CHART %s.mem '' '%s Real Memory (w/o shared)' 'MiB' mem %s.mem stacked 20003 %d\n", type, title, type, update_every);
for (w = root; w ; w = w->next) {
if(unlikely(w->exposed))
fprintf(stdout, "DIMENSION %s '' absolute %ld %ld\n", w->name, 1L, 1024L);
}
- fprintf(stdout, "CHART %s.vmem '' '%s Virtual Memory Size' 'MB' mem %s.vmem stacked 20005 %d\n", type, title, type, update_every);
+ fprintf(stdout, "CHART %s.vmem '' '%s Virtual Memory Size' 'MiB' mem %s.vmem stacked 20005 %d\n", type, title, type, update_every);
for (w = root; w ; w = w->next) {
if(unlikely(w->exposed))
fprintf(stdout, "DIMENSION %s '' absolute %ld %ld\n", w->name, 1L, 1024L);
@@ -3320,28 +3364,28 @@ static void send_charts_updates_to_netdata(struct target *root, const char *type
fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name);
}
- fprintf(stdout, "CHART %s.cpu_user '' '%s CPU User Time (%d%% = %d core%s)' 'cpu time %%' cpu %s.cpu_user stacked 20020 %d\n", type, title, (processors * 100), processors, (processors>1)?"s":"", type, update_every);
+ fprintf(stdout, "CHART %s.cpu_user '' '%s CPU User Time (%d%% = %d core%s)' 'percentage' cpu %s.cpu_user stacked 20020 %d\n", type, title, (processors * 100), processors, (processors>1)?"s":"", type, update_every);
for (w = root; w ; w = w->next) {
if(unlikely(w->exposed))
- fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, system_hz * RATES_DETAIL / 100LLU);
+ fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, time_factor * RATES_DETAIL / 100LLU);
}
- fprintf(stdout, "CHART %s.cpu_system '' '%s CPU System Time (%d%% = %d core%s)' 'cpu time %%' cpu %s.cpu_system stacked 20021 %d\n", type, title, (processors * 100), processors, (processors>1)?"s":"", type, update_every);
+ fprintf(stdout, "CHART %s.cpu_system '' '%s CPU System Time (%d%% = %d core%s)' 'percentage' cpu %s.cpu_system stacked 20021 %d\n", type, title, (processors * 100), processors, (processors>1)?"s":"", type, update_every);
for (w = root; w ; w = w->next) {
if(unlikely(w->exposed))
- fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, system_hz * RATES_DETAIL / 100LLU);
+ fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, time_factor * RATES_DETAIL / 100LLU);
}
if(show_guest_time) {
- fprintf(stdout, "CHART %s.cpu_guest '' '%s CPU Guest Time (%d%% = %d core%s)' 'cpu time %%' cpu %s.cpu_system stacked 20022 %d\n", type, title, (processors * 100), processors, (processors > 1) ? "s" : "", type, update_every);
+ fprintf(stdout, "CHART %s.cpu_guest '' '%s CPU Guest Time (%d%% = %d core%s)' 'percentage' cpu %s.cpu_system stacked 20022 %d\n", type, title, (processors * 100), processors, (processors > 1) ? "s" : "", type, update_every);
for (w = root; w; w = w->next) {
if(unlikely(w->exposed))
- fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, system_hz * RATES_DETAIL / 100LLU);
+ fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, time_factor * RATES_DETAIL / 100LLU);
}
}
#ifndef __FreeBSD__
- fprintf(stdout, "CHART %s.swap '' '%s Swap Memory' 'MB' swap %s.swap stacked 20011 %d\n", type, title, type, update_every);
+ fprintf(stdout, "CHART %s.swap '' '%s Swap Memory' 'MiB' swap %s.swap stacked 20011 %d\n", type, title, type, update_every);
for (w = root; w ; w = w->next) {
if(unlikely(w->exposed))
fprintf(stdout, "DIMENSION %s '' absolute %ld %ld\n", w->name, 1L, 1024L);
@@ -3373,25 +3417,25 @@ static void send_charts_updates_to_netdata(struct target *root, const char *type
fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, RATES_DETAIL);
}
#else
- fprintf(stdout, "CHART %s.preads '' '%s Disk Reads' 'kilobytes/s' disk %s.preads stacked 20002 %d\n", type, title, type, update_every);
+ fprintf(stdout, "CHART %s.preads '' '%s Disk Reads' 'KiB/s' disk %s.preads stacked 20002 %d\n", type, title, type, update_every);
for (w = root; w ; w = w->next) {
if(unlikely(w->exposed))
fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, 1024LLU * RATES_DETAIL);
}
- fprintf(stdout, "CHART %s.pwrites '' '%s Disk Writes' 'kilobytes/s' disk %s.pwrites stacked 20002 %d\n", type, title, type, update_every);
+ fprintf(stdout, "CHART %s.pwrites '' '%s Disk Writes' 'KiB/s' disk %s.pwrites stacked 20002 %d\n", type, title, type, update_every);
for (w = root; w ; w = w->next) {
if(unlikely(w->exposed))
fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, 1024LLU * RATES_DETAIL);
}
- fprintf(stdout, "CHART %s.lreads '' '%s Disk Logical Reads' 'kilobytes/s' disk %s.lreads stacked 20042 %d\n", type, title, type, update_every);
+ fprintf(stdout, "CHART %s.lreads '' '%s Disk Logical Reads' 'KiB/s' disk %s.lreads stacked 20042 %d\n", type, title, type, update_every);
for (w = root; w ; w = w->next) {
if(unlikely(w->exposed))
fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, 1024LLU * RATES_DETAIL);
}
- fprintf(stdout, "CHART %s.lwrites '' '%s I/O Logical Writes' 'kilobytes/s' disk %s.lwrites stacked 20042 %d\n", type, title, type, update_every);
+ fprintf(stdout, "CHART %s.lwrites '' '%s I/O Logical Writes' 'KiB/s' disk %s.lwrites stacked 20042 %d\n", type, title, type, update_every);
for (w = root; w ; w = w->next) {
if(unlikely(w->exposed))
fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, 1024LLU * RATES_DETAIL);
@@ -3710,7 +3754,14 @@ int main(int argc, char **argv) {
procfile_adaptive_initial_allocation = 1;
time_t started_t = now_monotonic_sec();
+
get_system_HZ();
+#ifdef __FreeBSD__
+ time_factor = 1000000ULL / RATES_DETAIL; // FreeBSD uses usecs
+#else
+ time_factor = system_hz; // Linux uses clock ticks
+#endif
+
get_system_pid_max();
get_system_cpus();
diff --git a/collectors/cgroups.plugin/Makefile.in b/collectors/cgroups.plugin/Makefile.in
deleted file mode 100644
index 49c3c983..00000000
--- a/collectors/cgroups.plugin/Makefile.in
+++ /dev/null
@@ -1,563 +0,0 @@
-# Makefile.in generated by automake 1.14.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2013 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-
-VPATH = @srcdir@
-am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-DIST_COMMON = $(top_srcdir)/build/subst.inc $(srcdir)/Makefile.in \
- $(srcdir)/Makefile.am $(dist_plugins_SCRIPTS) \
- $(dist_noinst_DATA)
-subdir = collectors/cgroups.plugin
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
-am__vpath_adj = case $$p in \
- $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
- *) f=$$p;; \
- esac;
-am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
-am__install_max = 40
-am__nobase_strip_setup = \
- srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
-am__nobase_strip = \
- for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
-am__nobase_list = $(am__nobase_strip_setup); \
- for p in $$list; do echo "$$p $$p"; done | \
- sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
- $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
- if (++n[$$2] == $(am__install_max)) \
- { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
- END { for (dir in files) print dir, files[dir] }'
-am__base_list = \
- sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
- sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
-am__uninstall_files_from_dir = { \
- test -z "$$files" \
- || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
- || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
- $(am__cd) "$$dir" && rm -f $$files; }; \
- }
-am__installdirs = "$(DESTDIR)$(pluginsdir)"
-SCRIPTS = $(dist_plugins_SCRIPTS)
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-DATA = $(dist_noinst_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-CLEANFILES = \
- cgroup-name.sh \
- $(NULL)
-
-SUFFIXES = .in
-dist_plugins_SCRIPTS = \
- cgroup-name.sh \
- cgroup-network-helper.sh \
- $(NULL)
-
-dist_noinst_DATA = \
- cgroup-name.sh.in \
- README.md \
- $(NULL)
-
-all: all-am
-
-.SUFFIXES:
-.SUFFIXES: .in
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/cgroups.plugin/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/cgroups.plugin/Makefile
-.PRECIOUS: Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-$(top_srcdir)/build/subst.inc:
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS)
- @$(NORMAL_INSTALL)
- @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
- done | \
- sed -e 'p;s,.*/,,;n' \
- -e 'h;s|.*|.|' \
- -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
- $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
- { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
- if ($$2 == $$4) { files[d] = files[d] " " $$1; \
- if (++n[d] == $(am__install_max)) { \
- print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
- else { print "f", d "/" $$4, $$1 } } \
- END { for (d in files) print "f", d, files[d] }' | \
- while read type dir files; do \
- if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
- test -z "$$files" || { \
- echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \
- $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \
- } \
- ; done
-
-uninstall-dist_pluginsSCRIPTS:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \
- files=`for p in $$list; do echo "$$p"; done | \
- sed -e 's,.*/,,;$(transform)'`; \
- dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir)
-tags TAGS:
-
-ctags CTAGS:
-
-cscope cscopelist:
-
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(SCRIPTS) $(DATA)
-installdirs:
- for dir in "$(DESTDIR)$(pluginsdir)"; do \
- test -z "$$dir" || $(MKDIR_P) "$$dir"; \
- done
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
- -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-am
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-generic
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am: install-dist_pluginsSCRIPTS
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am:
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am: uninstall-dist_pluginsSCRIPTS
-
-.MAKE: install-am install-strip
-
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dist_pluginsSCRIPTS install-dvi \
- install-dvi-am install-exec install-exec-am install-html \
- install-html-am install-info install-info-am install-man \
- install-pdf install-pdf-am install-ps install-ps-am \
- install-strip installcheck installcheck-am installdirs \
- maintainer-clean maintainer-clean-generic mostlyclean \
- mostlyclean-generic pdf pdf-am ps ps-am tags-am uninstall \
- uninstall-am uninstall-dist_pluginsSCRIPTS
-
-.in:
- if sed \
- -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \
- -e 's#[@]sbindir_POST@#$(sbindir)#g' \
- -e 's#[@]sysconfdir_POST@#$(sysconfdir)#g' \
- -e 's#[@]pythondir_POST@#$(pythondir)#g' \
- -e 's#[@]configdir_POST@#$(configdir)#g' \
- -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \
- -e 's#[@]cachedir_POST@#$(cachedir)#g' \
- $< > $@.tmp; then \
- mv "$@.tmp" "$@"; \
- else \
- rm -f "$@.tmp"; \
- false; \
- fi
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/cgroups.plugin/README.md b/collectors/cgroups.plugin/README.md
index 47eeebc5..d4f6d8ce 100644
--- a/collectors/cgroups.plugin/README.md
+++ b/collectors/cgroups.plugin/README.md
@@ -32,7 +32,7 @@ Linux exposes resource usage reporting and provides dynamic configuration for cg
path to /sys/fs/cgroup/blkio = /sys/fs/cgroup/blkio
path to /sys/fs/cgroup/memory = /sys/fs/cgroup/memory
path to /sys/fs/cgroup/devices = /sys/fs/cgroup/devices
-```
+```
netdata rescans these directories for added or removed cgroups every `check for new cgroups every` seconds.
@@ -51,7 +51,7 @@ To provide a sane default for this setting, netdata uses the following pattern l
```
[plugin:cgroups]
- search for cgroups in subpaths matching = !*/init.scope !*-qemu !/init.scope !/system !/systemd !/user !/user.slice *
+ search for cgroups in subpaths matching = !*/init.scope !*-qemu !/init.scope !/system !/systemd !/user !/user.slice *
```
So, we disable checking for **child cgroups** in systemd internal cgroups ([systemd services are monitored by netdata](#monitoring-systemd-services)), user cgroups (normally used for desktop and remote user sessions), qemu virtual machines (child cgroups of virtual machines) and `init.scope`. All others are enabled.
@@ -70,7 +70,7 @@ To provide a sane default, netdata uses the following pattern list (it checks th
```
[plugin:cgroups]
- enable by default cgroups matching = !*/init.scope *.scope !*/vcpu* !*/emulator !*.mount !*.partition !*.service !*.slice !*.swap !*.user !/ !/docker !/libvirt !/lxc !/lxc/*/ns !/lxc/*/ns/* !/machine !/qemu !/system !/systemd !/user *
+ enable by default cgroups matching = !*/init.scope *.scope !*/vcpu* !*/emulator !*.mount !*.partition !*.service !*.slice !*.swap !*.user !/ !/docker !/libvirt !/lxc !/lxc/*/ns !/lxc/*/ns/* !/machine !/qemu !/system !/systemd !/user *
```
The above provides the default `yes` or `no` setting for the cgroup. However, there is an additional step. In many cases the cgroups found in the `/sys/fs/cgroup` hierarchy are just random numbers and in many cases these numbers are ephemeral: they change across reboots or sessions.
@@ -158,6 +158,13 @@ cgroup_enable=memory swapaccount=1
You can add the above, directly at the `linux` line in your `/boot/grub/grub.cfg` or appending them to the `GRUB_CMDLINE_LINUX` in `/etc/default/grub` (in which case you will have to run `update-grub` before rebooting). On DigitalOcean debian images you may have to set it at `/etc/default/grub.d/50-cloudimg-settings.cfg`.
+Which systemd services are monitored by netdata is determined by the following pattern list:
+
+```
+[plugin:cgroups]
+ cgroups to match as systemd services = !/system.slice/*/*.service /system.slice/*.service
+```
+
---
## Monitoring ephemeral containers
@@ -185,3 +192,5 @@ So, when a network interface or container stops, netdata might log a few errors
6. obsolete charts will be removed from memory, 1 hour after the last user viewed them (configurable with `[global].cleanup obsolete charts after seconds = 3600` (at netdata.conf).
7. when obsolete charts are removed from memory they are also deleted from disk (configurable with `[global].delete obsolete charts files = yes`)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcgroups.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/cgroups.plugin/cgroup-name.sh b/collectors/cgroups.plugin/cgroup-name.sh
deleted file mode 100644
index 6bf8b8b0..00000000
--- a/collectors/cgroups.plugin/cgroup-name.sh
+++ /dev/null
@@ -1,196 +0,0 @@
-#!/usr/bin/env bash
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-# Script to find a better name for cgroups
-#
-
-export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin"
-export LC_ALL=C
-
-# -----------------------------------------------------------------------------
-
-PROGRAM_NAME="$(basename "${0}")"
-
-logdate() {
- date "+%Y-%m-%d %H:%M:%S"
-}
-
-log() {
- local status="${1}"
- shift
-
- echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}"
-
-}
-
-warning() {
- log WARNING "${@}"
-}
-
-error() {
- log ERROR "${@}"
-}
-
-info() {
- log INFO "${@}"
-}
-
-fatal() {
- log FATAL "${@}"
- exit 1
-}
-
-debug=0
-debug() {
- [ $debug -eq 1 ] && log DEBUG "${@}"
-}
-
-# -----------------------------------------------------------------------------
-
-[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="/usr/local/etc/netdata"
-[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="/usr/local/lib/netdata/conf.d"
-
-DOCKER_HOST="${DOCKER_HOST:=/var/run/docker.sock}"
-CGROUP="${1}"
-NAME=
-
-# -----------------------------------------------------------------------------
-
-if [ -z "${CGROUP}" ]
- then
- fatal "called without a cgroup name. Nothing to do."
-fi
-
-for CONFIG in "${NETDATA_USER_CONFIG_DIR}/cgroups-names.conf" "${NETDATA_STOCK_CONFIG_DIR}/cgroups-names.conf"
-do
- if [ -f "${CONFIG}" ]
- then
- NAME="$(grep "^${CGROUP} " "${CONFIG}" | sed "s/[[:space:]]\+/ /g" | cut -d ' ' -f 2)"
- if [ -z "${NAME}" ]
- then
- info "cannot find cgroup '${CGROUP}' in '${CONFIG}'."
- else
- break
- fi
- #else
- # info "configuration file '${CONFIG}' is not available."
- fi
-done
-
-function docker_get_name_classic {
- local id="${1}"
- info "Running command: docker ps --filter=id=\"${id}\" --format=\"{{.Names}}\""
- NAME="$( docker ps --filter=id="${id}" --format="{{.Names}}" )"
- return 0
-}
-
-function docker_get_name_api {
- local id="${1}"
- if [ ! -S "${DOCKER_HOST}" ]
- then
- warning "Can't find ${DOCKER_HOST}"
- return 1
- fi
- info "Running API command: /containers/${id}/json"
- JSON=$(echo -e "GET /containers/${id}/json HTTP/1.0\r\n" | nc -U ${DOCKER_HOST} | grep '^{.*')
- NAME=$(echo $JSON | jq -r .Name,.Config.Hostname | grep -v null | head -n1 | sed 's|^/||')
- return 0
-}
-
-function docker_get_name {
- local id="${1}"
- if hash docker 2>/dev/null
- then
- docker_get_name_classic "${id}"
- else
- docker_get_name_api "${id}" || docker_get_name_classic "${id}"
- fi
- if [ -z "${NAME}" ]
- then
- warning "cannot find the name of docker container '${id}'"
- NAME="${id:0:12}"
- else
- info "docker container '${id}' is named '${NAME}'"
- fi
-}
-
-if [ -z "${NAME}" ]
- then
- if [[ "${CGROUP}" =~ ^.*docker[-_/\.][a-fA-F0-9]+[-_\.]?.*$ ]]
- then
- # docker containers
-
- DOCKERID="$( echo "${CGROUP}" | sed "s|^.*docker[-_/]\([a-fA-F0-9]\+\)[-_\.]\?.*$|\1|" )"
- # echo "DOCKERID=${DOCKERID}"
-
- if [ ! -z "${DOCKERID}" -a \( ${#DOCKERID} -eq 64 -o ${#DOCKERID} -eq 12 \) ]
- then
- docker_get_name "${DOCKERID}"
- else
- error "a docker id cannot be extracted from docker cgroup '${CGROUP}'."
- fi
- elif [[ "${CGROUP}" =~ ^.*kubepods[_/].*[_/]pod[a-fA-F0-9-]+[_/][a-fA-F0-9]+$ ]]
- then
- # kubernetes
-
- DOCKERID="$( echo "${CGROUP}" | sed "s|^.*kubepods[_/].*[_/]pod[a-fA-F0-9-]\+[_/]\([a-fA-F0-9]\+\)$|\1|" )"
- # echo "DOCKERID=${DOCKERID}"
-
- if [ ! -z "${DOCKERID}" -a \( ${#DOCKERID} -eq 64 -o ${#DOCKERID} -eq 12 \) ]
- then
- docker_get_name "${DOCKERID}"
- else
- error "a docker id cannot be extracted from kubernetes cgroup '${CGROUP}'."
- fi
- elif [[ "${CGROUP}" =~ machine.slice[_/].*\.service ]]
- then
- # systemd-nspawn
-
- NAME="$(echo ${CGROUP} | sed 's/.*machine.slice[_\/]\(.*\)\.service/\1/g')"
-
- elif [[ "${CGROUP}" =~ machine.slice_machine.*-qemu ]]
- then
- # libvirtd / qemu virtual machines
-
- # NAME="$(echo ${CGROUP} | sed 's/machine.slice_machine.*-qemu//; s/\/x2d//; s/\/x2d/\-/g; s/\.scope//g')"
- NAME="qemu_$(echo ${CGROUP} | sed 's/machine.slice_machine.*-qemu//; s/\/x2d[[:digit:]]*//; s/\/x2d//g; s/\.scope//g')"
-
- elif [[ "${CGROUP}" =~ machine_.*\.libvirt-qemu ]]
- then
- # libvirtd / qemu virtual machines
- NAME="qemu_$(echo ${CGROUP} | sed 's/^machine_//; s/\.libvirt-qemu$//; s/-/_/;')"
-
- elif [[ "${CGROUP}" =~ qemu.slice_([0-9]+).scope && -d /etc/pve ]]
- then
- # Proxmox VMs
-
- FILENAME="/etc/pve/qemu-server/${BASH_REMATCH[1]}.conf"
- if [[ -f $FILENAME && -r $FILENAME ]]
- then
- NAME="qemu_$(grep -e '^name: ' "/etc/pve/qemu-server/${BASH_REMATCH[1]}.conf" | head -1 | sed -rn 's|\s*name\s*:\s*(.*)?$|\1|p')"
- else
- error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group."
- fi
- elif [[ "${CGROUP}" =~ lxc_([0-9]+) && -d /etc/pve ]]
- then
- # Proxmox Containers (LXC)
-
- FILENAME="/etc/pve/lxc/${BASH_REMATCH[1]}.conf"
- if [[ -f ${FILENAME} && -r ${FILENAME} ]]
- then
- NAME=$(grep -e '^hostname: ' /etc/pve/lxc/${BASH_REMATCH[1]}.conf | head -1 | sed -rn 's|\s*hostname\s*:\s*(.*)?$|\1|p')
- else
- error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group."
- fi
- fi
-
- [ -z "${NAME}" ] && NAME="${CGROUP}"
- [ ${#NAME} -gt 100 ] && NAME="${NAME:0:100}"
-fi
-
-info "cgroup '${CGROUP}' is called '${NAME}'"
-echo "${NAME}"
diff --git a/collectors/cgroups.plugin/cgroup-name.sh.in b/collectors/cgroups.plugin/cgroup-name.sh.in
index 53696a4b..3aebe2bf 100755
--- a/collectors/cgroups.plugin/cgroup-name.sh.in
+++ b/collectors/cgroups.plugin/cgroup-name.sh.in
@@ -1,4 +1,5 @@
#!/usr/bin/env bash
+#shellcheck disable=SC2001
# netdata
# real-time performance and health monitoring, done right!
@@ -16,42 +17,80 @@ export LC_ALL=C
PROGRAM_NAME="$(basename "${0}")"
logdate() {
- date "+%Y-%m-%d %H:%M:%S"
+ date "+%Y-%m-%d %H:%M:%S"
}
log() {
- local status="${1}"
- shift
+ local status="${1}"
+ shift
- echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}"
+ echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}"
}
warning() {
- log WARNING "${@}"
+ log WARNING "${@}"
}
error() {
- log ERROR "${@}"
+ log ERROR "${@}"
}
info() {
- log INFO "${@}"
+ log INFO "${@}"
}
fatal() {
- log FATAL "${@}"
- exit 1
+ log FATAL "${@}"
+ exit 1
}
-debug=0
-debug() {
- [ $debug -eq 1 ] && log DEBUG "${@}"
+function docker_get_name_classic() {
+ local id="${1}"
+ info "Running command: docker ps --filter=id=\"${id}\" --format=\"{{.Names}}\""
+ NAME="$(docker ps --filter=id="${id}" --format="{{.Names}}")"
+ return 0
+}
+
+function docker_get_name_api() {
+ local id="${1}"
+ if [ ! -S "${DOCKER_HOST}" ]; then
+ warning "Can't find ${DOCKER_HOST}"
+ return 1
+ fi
+ info "Running API command: /containers/${id}/json"
+ JSON=$(echo -e "GET /containers/${id}/json HTTP/1.0\\r\\n" | nc -U "${DOCKER_HOST}" | grep '^{.*')
+ NAME=$(echo "$JSON" | jq -r .Name,.Config.Hostname | grep -v null | head -n1 | sed 's|^/||')
+ return 0
+}
+
+function docker_get_name() {
+ local id="${1}"
+ if hash docker 2>/dev/null; then
+ docker_get_name_classic "${id}"
+ else
+ docker_get_name_api "${id}" || docker_get_name_classic "${id}"
+ fi
+ if [ -z "${NAME}" ]; then
+ warning "cannot find the name of docker container '${id}'"
+ NAME="${id:0:12}"
+ else
+ info "docker container '${id}' is named '${NAME}'"
+ fi
+}
+
+function docker_validate_id() {
+ local id="${1}"
+ if [ -n "${id}" ] && { [ ${#id} -eq 64 ] || [ ${#id} -eq 12 ]; }; then
+ docker_get_name "${id}"
+ else
+ error "a docker id cannot be extracted from docker cgroup '${CGROUP}'."
+ fi
}
# -----------------------------------------------------------------------------
-[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="@configdir_POST@"
+[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="@configdir_POST@"
[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="@libconfigdir_POST@"
DOCKER_HOST="${DOCKER_HOST:=/var/run/docker.sock}"
@@ -60,136 +99,77 @@ NAME=
# -----------------------------------------------------------------------------
-if [ -z "${CGROUP}" ]
- then
- fatal "called without a cgroup name. Nothing to do."
+if [ -z "${CGROUP}" ]; then
+ fatal "called without a cgroup name. Nothing to do."
fi
-for CONFIG in "${NETDATA_USER_CONFIG_DIR}/cgroups-names.conf" "${NETDATA_STOCK_CONFIG_DIR}/cgroups-names.conf"
-do
- if [ -f "${CONFIG}" ]
- then
- NAME="$(grep "^${CGROUP} " "${CONFIG}" | sed "s/[[:space:]]\+/ /g" | cut -d ' ' -f 2)"
- if [ -z "${NAME}" ]
- then
- info "cannot find cgroup '${CGROUP}' in '${CONFIG}'."
- else
- break
- fi
- #else
- # info "configuration file '${CONFIG}' is not available."
- fi
+for CONFIG in "${NETDATA_USER_CONFIG_DIR}/cgroups-names.conf" "${NETDATA_STOCK_CONFIG_DIR}/cgroups-names.conf"; do
+ if [ -f "${CONFIG}" ]; then
+ NAME="$(grep "^${CGROUP} " "${CONFIG}" | sed 's/[[:space:]]\+/ /g' | cut -d ' ' -f 2)"
+ if [ -z "${NAME}" ]; then
+ info "cannot find cgroup '${CGROUP}' in '${CONFIG}'."
+ else
+ break
+ fi
+ #else
+ # info "configuration file '${CONFIG}' is not available."
+ fi
done
-function docker_get_name_classic {
- local id="${1}"
- info "Running command: docker ps --filter=id=\"${id}\" --format=\"{{.Names}}\""
- NAME="$( docker ps --filter=id="${id}" --format="{{.Names}}" )"
- return 0
-}
-
-function docker_get_name_api {
- local id="${1}"
- if [ ! -S "${DOCKER_HOST}" ]
- then
- warning "Can't find ${DOCKER_HOST}"
- return 1
- fi
- info "Running API command: /containers/${id}/json"
- JSON=$(echo -e "GET /containers/${id}/json HTTP/1.0\r\n" | nc -U ${DOCKER_HOST} | grep '^{.*')
- NAME=$(echo $JSON | jq -r .Name,.Config.Hostname | grep -v null | head -n1 | sed 's|^/||')
- return 0
-}
-
-function docker_get_name {
- local id="${1}"
- if hash docker 2>/dev/null
- then
- docker_get_name_classic "${id}"
- else
- docker_get_name_api "${id}" || docker_get_name_classic "${id}"
- fi
- if [ -z "${NAME}" ]
- then
- warning "cannot find the name of docker container '${id}'"
- NAME="${id:0:12}"
- else
- info "docker container '${id}' is named '${NAME}'"
- fi
-}
-
-if [ -z "${NAME}" ]
- then
- if [[ "${CGROUP}" =~ ^.*docker[-_/\.][a-fA-F0-9]+[-_\.]?.*$ ]]
- then
- # docker containers
-
- DOCKERID="$( echo "${CGROUP}" | sed "s|^.*docker[-_/]\([a-fA-F0-9]\+\)[-_\.]\?.*$|\1|" )"
- # echo "DOCKERID=${DOCKERID}"
-
- if [ ! -z "${DOCKERID}" -a \( ${#DOCKERID} -eq 64 -o ${#DOCKERID} -eq 12 \) ]
- then
- docker_get_name "${DOCKERID}"
- else
- error "a docker id cannot be extracted from docker cgroup '${CGROUP}'."
- fi
- elif [[ "${CGROUP}" =~ ^.*kubepods[_/].*[_/]pod[a-fA-F0-9-]+[_/][a-fA-F0-9]+$ ]]
- then
- # kubernetes
-
- DOCKERID="$( echo "${CGROUP}" | sed "s|^.*kubepods[_/].*[_/]pod[a-fA-F0-9-]\+[_/]\([a-fA-F0-9]\+\)$|\1|" )"
- # echo "DOCKERID=${DOCKERID}"
-
- if [ ! -z "${DOCKERID}" -a \( ${#DOCKERID} -eq 64 -o ${#DOCKERID} -eq 12 \) ]
- then
- docker_get_name "${DOCKERID}"
- else
- error "a docker id cannot be extracted from kubernetes cgroup '${CGROUP}'."
- fi
- elif [[ "${CGROUP}" =~ machine.slice[_/].*\.service ]]
- then
- # systemd-nspawn
-
- NAME="$(echo ${CGROUP} | sed 's/.*machine.slice[_\/]\(.*\)\.service/\1/g')"
-
- elif [[ "${CGROUP}" =~ machine.slice_machine.*-qemu ]]
- then
- # libvirtd / qemu virtual machines
-
- # NAME="$(echo ${CGROUP} | sed 's/machine.slice_machine.*-qemu//; s/\/x2d//; s/\/x2d/\-/g; s/\.scope//g')"
- NAME="qemu_$(echo ${CGROUP} | sed 's/machine.slice_machine.*-qemu//; s/\/x2d[[:digit:]]*//; s/\/x2d//g; s/\.scope//g')"
-
- elif [[ "${CGROUP}" =~ machine_.*\.libvirt-qemu ]]
- then
- # libvirtd / qemu virtual machines
- NAME="qemu_$(echo ${CGROUP} | sed 's/^machine_//; s/\.libvirt-qemu$//; s/-/_/;')"
-
- elif [[ "${CGROUP}" =~ qemu.slice_([0-9]+).scope && -d /etc/pve ]]
- then
- # Proxmox VMs
-
- FILENAME="/etc/pve/qemu-server/${BASH_REMATCH[1]}.conf"
- if [[ -f $FILENAME && -r $FILENAME ]]
- then
- NAME="qemu_$(grep -e '^name: ' "/etc/pve/qemu-server/${BASH_REMATCH[1]}.conf" | head -1 | sed -rn 's|\s*name\s*:\s*(.*)?$|\1|p')"
- else
- error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group."
- fi
- elif [[ "${CGROUP}" =~ lxc_([0-9]+) && -d /etc/pve ]]
- then
- # Proxmox Containers (LXC)
-
- FILENAME="/etc/pve/lxc/${BASH_REMATCH[1]}.conf"
- if [[ -f ${FILENAME} && -r ${FILENAME} ]]
- then
- NAME=$(grep -e '^hostname: ' /etc/pve/lxc/${BASH_REMATCH[1]}.conf | head -1 | sed -rn 's|\s*hostname\s*:\s*(.*)?$|\1|p')
- else
- error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group."
- fi
- fi
-
- [ -z "${NAME}" ] && NAME="${CGROUP}"
- [ ${#NAME} -gt 100 ] && NAME="${NAME:0:100}"
+if [ -z "${NAME}" ]; then
+ if [[ ${CGROUP} =~ ^.*docker[-_/\.][a-fA-F0-9]+[-_\.]?.*$ ]]; then
+ # docker containers
+ #shellcheck disable=SC1117
+ DOCKERID="$(echo "${CGROUP}" | sed "s|^.*docker[-_/]\([a-fA-F0-9]\+\)[-_\.]\?.*$|\1|")"
+ docker_validate_id "${DOCKERID}"
+
+ elif [[ ${CGROUP} =~ ^.*ecs[-_/\.][a-fA-F0-9]+[-_\.]?.*$ ]]; then
+ # ECS
+ #shellcheck disable=SC1117
+ DOCKERID="$(echo "${CGROUP}" | sed "s|^.*ecs[-_/].*[-_/]\([a-fA-F0-9]\+\)[-_\.]\?.*$|\1|")"
+ docker_validate_id "${DOCKERID}"
+
+ elif [[ ${CGROUP} =~ ^.*kubepods[_/].*[_/]pod[a-fA-F0-9-]+[_/][a-fA-F0-9]+$ ]]; then
+ # kubernetes
+ #shellcheck disable=SC1117
+ DOCKERID="$(echo "${CGROUP}" | sed "s|^.*kubepods[_/].*[_/]pod[a-fA-F0-9-]\+[_/]\([a-fA-F0-9]\+\)$|\1|")"
+ docker_validate_id "${DOCKERID}"
+
+ elif [[ ${CGROUP} =~ machine.slice[_/].*\.service ]]; then
+ # systemd-nspawn
+ NAME="$(echo "${CGROUP}" | sed 's/.*machine.slice[_\/]\(.*\)\.service/\1/g')"
+
+ elif [[ ${CGROUP} =~ machine.slice_machine.*-qemu ]]; then
+ # libvirtd / qemu virtual machines
+ # NAME="$(echo ${CGROUP} | sed 's/machine.slice_machine.*-qemu//; s/\/x2d//; s/\/x2d/\-/g; s/\.scope//g')"
+ NAME="qemu_$(echo "${CGROUP}" | sed 's/machine.slice_machine.*-qemu//; s/\/x2d[[:digit:]]*//; s/\/x2d//g; s/\.scope//g')"
+
+ elif [[ ${CGROUP} =~ machine_.*\.libvirt-qemu ]]; then
+ # libvirtd / qemu virtual machines
+ NAME="qemu_$(echo "${CGROUP}" | sed 's/^machine_//; s/\.libvirt-qemu$//; s/-/_/;')"
+
+ elif [[ ${CGROUP} =~ qemu.slice_([0-9]+).scope && -d /etc/pve ]]; then
+ # Proxmox VMs
+
+ FILENAME="/etc/pve/qemu-server/${BASH_REMATCH[1]}.conf"
+ if [[ -f $FILENAME && -r $FILENAME ]]; then
+ NAME="qemu_$(grep -e '^name: ' "/etc/pve/qemu-server/${BASH_REMATCH[1]}.conf" | head -1 | sed -rn 's|\s*name\s*:\s*(.*)?$|\1|p')"
+ else
+ error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group."
+ fi
+ elif [[ ${CGROUP} =~ lxc_([0-9]+) && -d /etc/pve ]]; then
+ # Proxmox Containers (LXC)
+
+ FILENAME="/etc/pve/lxc/${BASH_REMATCH[1]}.conf"
+ if [[ -f ${FILENAME} && -r ${FILENAME} ]]; then
+ NAME=$(grep -e '^hostname: ' "/etc/pve/lxc/${BASH_REMATCH[1]}.conf" | head -1 | sed -rn 's|\s*hostname\s*:\s*(.*)?$|\1|p')
+ else
+ error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group."
+ fi
+ fi
+
+ [ -z "${NAME}" ] && NAME="${CGROUP}"
+ [ ${#NAME} -gt 100 ] && NAME="${NAME:0:100}"
fi
info "cgroup '${CGROUP}' is called '${NAME}'"
diff --git a/collectors/cgroups.plugin/cgroup-network.c b/collectors/cgroups.plugin/cgroup-network.c
index 0cf2a263..5aeb9a59 100644
--- a/collectors/cgroups.plugin/cgroup-network.c
+++ b/collectors/cgroups.plugin/cgroup-network.c
@@ -24,6 +24,13 @@ void netdata_cleanup_and_exit(int ret) {
exit(ret);
}
+void send_statistics( const char *action, const char *action_result, const char *action_data) {
+ (void) action;
+ (void) action_result;
+ (void) action_data;
+ return;
+}
+
// callbacks required by popen()
void signals_block(void) {};
void signals_unblock(void) {};
diff --git a/collectors/cgroups.plugin/sys_fs_cgroup.c b/collectors/cgroups.plugin/sys_fs_cgroup.c
index 9c0fd7f4..f8e5167f 100644
--- a/collectors/cgroups.plugin/sys_fs_cgroup.c
+++ b/collectors/cgroups.plugin/sys_fs_cgroup.c
@@ -1489,7 +1489,7 @@ void update_systemd_services_charts(
, "services.mem_usage"
, (cgroup_used_memory_without_cache) ? "Systemd Services Used Memory without Cache"
: "Systemd Services Used Memory"
- , "MB"
+ , "MiB"
, PLUGIN_CGROUPS_NAME
, PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
, NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 10
@@ -1512,7 +1512,7 @@ void update_systemd_services_charts(
, "mem"
, "services.mem_rss"
, "Systemd Services RSS Memory"
- , "MB"
+ , "MiB"
, PLUGIN_CGROUPS_NAME
, PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
, NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 20
@@ -1533,7 +1533,7 @@ void update_systemd_services_charts(
, "mem"
, "services.mem_mapped"
, "Systemd Services Mapped Memory"
- , "MB"
+ , "MiB"
, PLUGIN_CGROUPS_NAME
, PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
, NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 30
@@ -1554,7 +1554,7 @@ void update_systemd_services_charts(
, "mem"
, "services.mem_cache"
, "Systemd Services Cache Memory"
- , "MB"
+ , "MiB"
, PLUGIN_CGROUPS_NAME
, PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
, NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 40
@@ -1575,7 +1575,7 @@ void update_systemd_services_charts(
, "mem"
, "services.mem_writeback"
, "Systemd Services Writeback Memory"
- , "MB"
+ , "MiB"
, PLUGIN_CGROUPS_NAME
, PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
, NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 50
@@ -1596,7 +1596,7 @@ void update_systemd_services_charts(
, "mem"
, "services.mem_pgfault"
, "Systemd Services Memory Minor Page Faults"
- , "MB/s"
+ , "MiB/s"
, PLUGIN_CGROUPS_NAME
, PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
, NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 60
@@ -1616,7 +1616,7 @@ void update_systemd_services_charts(
, "mem"
, "services.mem_pgmajfault"
, "Systemd Services Memory Major Page Faults"
- , "MB/s"
+ , "MiB/s"
, PLUGIN_CGROUPS_NAME
, PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
, NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 70
@@ -1637,7 +1637,7 @@ void update_systemd_services_charts(
, "mem"
, "services.mem_pgpgin"
, "Systemd Services Memory Charging Activity"
- , "MB/s"
+ , "MiB/s"
, PLUGIN_CGROUPS_NAME
, PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
, NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 80
@@ -1658,7 +1658,7 @@ void update_systemd_services_charts(
, "mem"
, "services.mem_pgpgout"
, "Systemd Services Memory Uncharging Activity"
- , "MB/s"
+ , "MiB/s"
, PLUGIN_CGROUPS_NAME
, PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
, NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 90
@@ -1681,7 +1681,7 @@ void update_systemd_services_charts(
, "mem"
, "services.mem_failcnt"
, "Systemd Services Memory Limit Failures"
- , "MB"
+ , "failures"
, PLUGIN_CGROUPS_NAME
, PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
, NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 110
@@ -1704,7 +1704,7 @@ void update_systemd_services_charts(
, "swap"
, "services.swap_usage"
, "Systemd Services Swap Memory Used"
- , "MB"
+ , "MiB"
, PLUGIN_CGROUPS_NAME
, PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
, NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 100
@@ -1727,7 +1727,7 @@ void update_systemd_services_charts(
, "disk"
, "services.io_read"
, "Systemd Services Disk Read Bandwidth"
- , "KB/s"
+ , "KiB/s"
, PLUGIN_CGROUPS_NAME
, PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
, NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 120
@@ -1748,7 +1748,7 @@ void update_systemd_services_charts(
, "disk"
, "services.io_write"
, "Systemd Services Disk Write Bandwidth"
- , "KB/s"
+ , "KiB/s"
, PLUGIN_CGROUPS_NAME
, PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
, NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 130
@@ -1815,7 +1815,7 @@ void update_systemd_services_charts(
, "disk"
, "services.throttle_io_read"
, "Systemd Services Throttle Disk Read Bandwidth"
- , "KB/s"
+ , "KiB/s"
, PLUGIN_CGROUPS_NAME
, PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
, NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 160
@@ -1836,7 +1836,7 @@ void update_systemd_services_charts(
, "disk"
, "services.throttle_io_write"
, "Systemd Services Throttle Disk Write Bandwidth"
- , "KB/s"
+ , "KiB/s"
, PLUGIN_CGROUPS_NAME
, PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
, NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 170
@@ -2317,7 +2317,7 @@ void update_cgroup_charts(int update_every) {
, "mem"
, "cgroup.mem"
, title
- , "MB"
+ , "MiB"
, PLUGIN_CGROUPS_NAME
, PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 210
@@ -2357,7 +2357,7 @@ void update_cgroup_charts(int update_every) {
, "mem"
, "cgroup.writeback"
, title
- , "MB"
+ , "MiB"
, PLUGIN_CGROUPS_NAME
, PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 300
@@ -2389,7 +2389,7 @@ void update_cgroup_charts(int update_every) {
, "mem"
, "cgroup.mem_activity"
, title
- , "MB/s"
+ , "MiB/s"
, PLUGIN_CGROUPS_NAME
, PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 400
@@ -2417,7 +2417,7 @@ void update_cgroup_charts(int update_every) {
, "mem"
, "cgroup.pgfaults"
, title
- , "MB/s"
+ , "MiB/s"
, PLUGIN_CGROUPS_NAME
, PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 500
@@ -2447,7 +2447,7 @@ void update_cgroup_charts(int update_every) {
, "mem"
, "cgroup.mem_usage"
, title
- , "MB"
+ , "MiB"
, PLUGIN_CGROUPS_NAME
, PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 200
@@ -2505,7 +2505,7 @@ void update_cgroup_charts(int update_every) {
, "disk"
, "cgroup.io"
, title
- , "KB/s"
+ , "KiB/s"
, PLUGIN_CGROUPS_NAME
, PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 1200
@@ -2565,7 +2565,7 @@ void update_cgroup_charts(int update_every) {
, "disk"
, "cgroup.throttle_io"
, title
- , "KB/s"
+ , "KiB/s"
, PLUGIN_CGROUPS_NAME
, PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 1200
diff --git a/collectors/charts.d.plugin/.keep b/collectors/charts.d.plugin/.keep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collectors/charts.d.plugin/.keep
diff --git a/collectors/charts.d.plugin/Makefile.am b/collectors/charts.d.plugin/Makefile.am
index e2e00258..2989b4b8 100644
--- a/collectors/charts.d.plugin/Makefile.am
+++ b/collectors/charts.d.plugin/Makefile.am
@@ -32,12 +32,11 @@ dist_charts_DATA = \
userchartsconfigdir=$(configdir)/charts.d
dist_userchartsconfig_DATA = \
- $(top_srcdir)/installer/.keep \
+ .keep \
$(NULL)
chartsconfigdir=$(libconfigdir)/charts.d
dist_chartsconfig_DATA = \
- $(top_srcdir)/installer/.keep \
$(NULL)
include ap/Makefile.inc
diff --git a/collectors/charts.d.plugin/Makefile.in b/collectors/charts.d.plugin/Makefile.in
deleted file mode 100644
index 23e2edeb..00000000
--- a/collectors/charts.d.plugin/Makefile.in
+++ /dev/null
@@ -1,953 +0,0 @@
-# Makefile.in generated by automake 1.14.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2013 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-
-VPATH = @srcdir@
-am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-DIST_COMMON = $(top_srcdir)/build/subst.inc $(srcdir)/ap/Makefile.inc \
- $(srcdir)/apache/Makefile.inc $(srcdir)/apcupsd/Makefile.inc \
- $(srcdir)/cpu_apps/Makefile.inc $(srcdir)/cpufreq/Makefile.inc \
- $(srcdir)/example/Makefile.inc $(srcdir)/exim/Makefile.inc \
- $(srcdir)/hddtemp/Makefile.inc \
- $(srcdir)/libreswan/Makefile.inc \
- $(srcdir)/load_average/Makefile.inc \
- $(srcdir)/mem_apps/Makefile.inc $(srcdir)/mysql/Makefile.inc \
- $(srcdir)/nginx/Makefile.inc $(srcdir)/nut/Makefile.inc \
- $(srcdir)/opensips/Makefile.inc $(srcdir)/phpfpm/Makefile.inc \
- $(srcdir)/postfix/Makefile.inc $(srcdir)/sensors/Makefile.inc \
- $(srcdir)/squid/Makefile.inc $(srcdir)/tomcat/Makefile.inc \
- $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
- $(dist_charts_SCRIPTS) $(dist_plugins_SCRIPTS) \
- $(dist_charts_DATA) $(dist_chartsconfig_DATA) \
- $(dist_libconfig_DATA) $(dist_noinst_DATA) \
- $(dist_userchartsconfig_DATA)
-subdir = collectors/charts.d.plugin
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
-am__vpath_adj = case $$p in \
- $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
- *) f=$$p;; \
- esac;
-am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
-am__install_max = 40
-am__nobase_strip_setup = \
- srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
-am__nobase_strip = \
- for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
-am__nobase_list = $(am__nobase_strip_setup); \
- for p in $$list; do echo "$$p $$p"; done | \
- sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
- $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
- if (++n[$$2] == $(am__install_max)) \
- { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
- END { for (dir in files) print dir, files[dir] }'
-am__base_list = \
- sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
- sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
-am__uninstall_files_from_dir = { \
- test -z "$$files" \
- || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
- || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
- $(am__cd) "$$dir" && rm -f $$files; }; \
- }
-am__installdirs = "$(DESTDIR)$(chartsdir)" "$(DESTDIR)$(pluginsdir)" \
- "$(DESTDIR)$(chartsdir)" "$(DESTDIR)$(chartsconfigdir)" \
- "$(DESTDIR)$(libconfigdir)" "$(DESTDIR)$(userchartsconfigdir)"
-SCRIPTS = $(dist_charts_SCRIPTS) $(dist_plugins_SCRIPTS)
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-DATA = $(dist_charts_DATA) $(dist_chartsconfig_DATA) \
- $(dist_libconfig_DATA) $(dist_noinst_DATA) \
- $(dist_userchartsconfig_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-CLEANFILES = \
- charts.d.plugin \
- $(NULL)
-
-SUFFIXES = .in
-dist_libconfig_DATA = \
- charts.d.conf \
- $(NULL)
-
-dist_plugins_SCRIPTS = \
- charts.d.dryrun-helper.sh \
- charts.d.plugin \
- loopsleepms.sh.inc \
- $(NULL)
-
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA = charts.d.plugin.in README.md $(NULL) ap/README.md \
- ap/Makefile.inc apache/README.md apache/Makefile.inc \
- apcupsd/README.md apcupsd/Makefile.inc cpu_apps/README.md \
- cpu_apps/Makefile.inc cpufreq/README.md cpufreq/Makefile.inc \
- example/README.md example/Makefile.inc exim/README.md \
- exim/Makefile.inc hddtemp/README.md hddtemp/Makefile.inc \
- libreswan/README.md libreswan/Makefile.inc \
- load_average/README.md load_average/Makefile.inc \
- mem_apps/README.md mem_apps/Makefile.inc mysql/README.md \
- mysql/Makefile.inc nginx/README.md nginx/Makefile.inc \
- nut/README.md nut/Makefile.inc opensips/README.md \
- opensips/Makefile.inc phpfpm/README.md phpfpm/Makefile.inc \
- postfix/README.md postfix/Makefile.inc sensors/README.md \
- sensors/Makefile.inc squid/README.md squid/Makefile.inc \
- tomcat/README.md tomcat/Makefile.inc
-dist_charts_SCRIPTS = \
- $(NULL)
-
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-dist_charts_DATA = $(NULL) ap/ap.chart.sh apache/apache.chart.sh \
- apcupsd/apcupsd.chart.sh cpu_apps/cpu_apps.chart.sh \
- cpufreq/cpufreq.chart.sh example/example.chart.sh \
- exim/exim.chart.sh hddtemp/hddtemp.chart.sh \
- libreswan/libreswan.chart.sh \
- load_average/load_average.chart.sh mem_apps/mem_apps.chart.sh \
- mysql/mysql.chart.sh nginx/nginx.chart.sh nut/nut.chart.sh \
- opensips/opensips.chart.sh phpfpm/phpfpm.chart.sh \
- postfix/postfix.chart.sh sensors/sensors.chart.sh \
- squid/squid.chart.sh tomcat/tomcat.chart.sh
-userchartsconfigdir = $(configdir)/charts.d
-dist_userchartsconfig_DATA = \
- $(top_srcdir)/installer/.keep \
- $(NULL)
-
-chartsconfigdir = $(libconfigdir)/charts.d
-dist_chartsconfig_DATA = $(top_srcdir)/installer/.keep $(NULL) \
- ap/ap.conf apache/apache.conf apcupsd/apcupsd.conf \
- cpu_apps/cpu_apps.conf cpufreq/cpufreq.conf \
- example/example.conf exim/exim.conf hddtemp/hddtemp.conf \
- libreswan/libreswan.conf load_average/load_average.conf \
- mem_apps/mem_apps.conf mysql/mysql.conf nginx/nginx.conf \
- nut/nut.conf opensips/opensips.conf phpfpm/phpfpm.conf \
- postfix/postfix.conf sensors/sensors.conf squid/squid.conf \
- tomcat/tomcat.conf
-all: all-am
-
-.SUFFIXES:
-.SUFFIXES: .in
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(srcdir)/ap/Makefile.inc $(srcdir)/apache/Makefile.inc $(srcdir)/apcupsd/Makefile.inc $(srcdir)/cpu_apps/Makefile.inc $(srcdir)/cpufreq/Makefile.inc $(srcdir)/example/Makefile.inc $(srcdir)/exim/Makefile.inc $(srcdir)/hddtemp/Makefile.inc $(srcdir)/libreswan/Makefile.inc $(srcdir)/load_average/Makefile.inc $(srcdir)/mem_apps/Makefile.inc $(srcdir)/mysql/Makefile.inc $(srcdir)/nginx/Makefile.inc $(srcdir)/nut/Makefile.inc $(srcdir)/opensips/Makefile.inc $(srcdir)/phpfpm/Makefile.inc $(srcdir)/postfix/Makefile.inc $(srcdir)/sensors/Makefile.inc $(srcdir)/squid/Makefile.inc $(srcdir)/tomcat/Makefile.inc $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/charts.d.plugin/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/charts.d.plugin/Makefile
-.PRECIOUS: Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-$(top_srcdir)/build/subst.inc $(srcdir)/ap/Makefile.inc $(srcdir)/apache/Makefile.inc $(srcdir)/apcupsd/Makefile.inc $(srcdir)/cpu_apps/Makefile.inc $(srcdir)/cpufreq/Makefile.inc $(srcdir)/example/Makefile.inc $(srcdir)/exim/Makefile.inc $(srcdir)/hddtemp/Makefile.inc $(srcdir)/libreswan/Makefile.inc $(srcdir)/load_average/Makefile.inc $(srcdir)/mem_apps/Makefile.inc $(srcdir)/mysql/Makefile.inc $(srcdir)/nginx/Makefile.inc $(srcdir)/nut/Makefile.inc $(srcdir)/opensips/Makefile.inc $(srcdir)/phpfpm/Makefile.inc $(srcdir)/postfix/Makefile.inc $(srcdir)/sensors/Makefile.inc $(srcdir)/squid/Makefile.inc $(srcdir)/tomcat/Makefile.inc:
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-install-dist_chartsSCRIPTS: $(dist_charts_SCRIPTS)
- @$(NORMAL_INSTALL)
- @list='$(dist_charts_SCRIPTS)'; test -n "$(chartsdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(chartsdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(chartsdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
- done | \
- sed -e 'p;s,.*/,,;n' \
- -e 'h;s|.*|.|' \
- -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
- $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
- { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
- if ($$2 == $$4) { files[d] = files[d] " " $$1; \
- if (++n[d] == $(am__install_max)) { \
- print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
- else { print "f", d "/" $$4, $$1 } } \
- END { for (d in files) print "f", d, files[d] }' | \
- while read type dir files; do \
- if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
- test -z "$$files" || { \
- echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(chartsdir)$$dir'"; \
- $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(chartsdir)$$dir" || exit $$?; \
- } \
- ; done
-
-uninstall-dist_chartsSCRIPTS:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_charts_SCRIPTS)'; test -n "$(chartsdir)" || exit 0; \
- files=`for p in $$list; do echo "$$p"; done | \
- sed -e 's,.*/,,;$(transform)'`; \
- dir='$(DESTDIR)$(chartsdir)'; $(am__uninstall_files_from_dir)
-install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS)
- @$(NORMAL_INSTALL)
- @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
- done | \
- sed -e 'p;s,.*/,,;n' \
- -e 'h;s|.*|.|' \
- -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
- $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
- { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
- if ($$2 == $$4) { files[d] = files[d] " " $$1; \
- if (++n[d] == $(am__install_max)) { \
- print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
- else { print "f", d "/" $$4, $$1 } } \
- END { for (d in files) print "f", d, files[d] }' | \
- while read type dir files; do \
- if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
- test -z "$$files" || { \
- echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \
- $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \
- } \
- ; done
-
-uninstall-dist_pluginsSCRIPTS:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \
- files=`for p in $$list; do echo "$$p"; done | \
- sed -e 's,.*/,,;$(transform)'`; \
- dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir)
-install-dist_chartsDATA: $(dist_charts_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_charts_DATA)'; test -n "$(chartsdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(chartsdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(chartsdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(chartsdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(chartsdir)" || exit $$?; \
- done
-
-uninstall-dist_chartsDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_charts_DATA)'; test -n "$(chartsdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(chartsdir)'; $(am__uninstall_files_from_dir)
-install-dist_chartsconfigDATA: $(dist_chartsconfig_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_chartsconfig_DATA)'; test -n "$(chartsconfigdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(chartsconfigdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(chartsconfigdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(chartsconfigdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(chartsconfigdir)" || exit $$?; \
- done
-
-uninstall-dist_chartsconfigDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_chartsconfig_DATA)'; test -n "$(chartsconfigdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(chartsconfigdir)'; $(am__uninstall_files_from_dir)
-install-dist_libconfigDATA: $(dist_libconfig_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \
- done
-
-uninstall-dist_libconfigDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir)
-install-dist_userchartsconfigDATA: $(dist_userchartsconfig_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_userchartsconfig_DATA)'; test -n "$(userchartsconfigdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(userchartsconfigdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(userchartsconfigdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(userchartsconfigdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(userchartsconfigdir)" || exit $$?; \
- done
-
-uninstall-dist_userchartsconfigDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_userchartsconfig_DATA)'; test -n "$(userchartsconfigdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(userchartsconfigdir)'; $(am__uninstall_files_from_dir)
-tags TAGS:
-
-ctags CTAGS:
-
-cscope cscopelist:
-
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(SCRIPTS) $(DATA)
-installdirs:
- for dir in "$(DESTDIR)$(chartsdir)" "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(chartsdir)" "$(DESTDIR)$(chartsconfigdir)" "$(DESTDIR)$(libconfigdir)" "$(DESTDIR)$(userchartsconfigdir)"; do \
- test -z "$$dir" || $(MKDIR_P) "$$dir"; \
- done
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
- -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-am
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-generic
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am: install-dist_chartsDATA install-dist_chartsSCRIPTS \
- install-dist_chartsconfigDATA install-dist_libconfigDATA \
- install-dist_pluginsSCRIPTS install-dist_userchartsconfigDATA
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am:
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am: uninstall-dist_chartsDATA uninstall-dist_chartsSCRIPTS \
- uninstall-dist_chartsconfigDATA uninstall-dist_libconfigDATA \
- uninstall-dist_pluginsSCRIPTS \
- uninstall-dist_userchartsconfigDATA
-
-.MAKE: install-am install-strip
-
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dist_chartsDATA \
- install-dist_chartsSCRIPTS install-dist_chartsconfigDATA \
- install-dist_libconfigDATA install-dist_pluginsSCRIPTS \
- install-dist_userchartsconfigDATA install-dvi install-dvi-am \
- install-exec install-exec-am install-html install-html-am \
- install-info install-info-am install-man install-pdf \
- install-pdf-am install-ps install-ps-am install-strip \
- installcheck installcheck-am installdirs maintainer-clean \
- maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
- pdf-am ps ps-am tags-am uninstall uninstall-am \
- uninstall-dist_chartsDATA uninstall-dist_chartsSCRIPTS \
- uninstall-dist_chartsconfigDATA uninstall-dist_libconfigDATA \
- uninstall-dist_pluginsSCRIPTS \
- uninstall-dist_userchartsconfigDATA
-
-.in:
- if sed \
- -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \
- -e 's#[@]sbindir_POST@#$(sbindir)#g' \
- -e 's#[@]sysconfdir_POST@#$(sysconfdir)#g' \
- -e 's#[@]pythondir_POST@#$(pythondir)#g' \
- -e 's#[@]configdir_POST@#$(configdir)#g' \
- -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \
- -e 's#[@]cachedir_POST@#$(cachedir)#g' \
- $< > $@.tmp; then \
- mv "$@.tmp" "$@"; \
- else \
- rm -f "$@.tmp"; \
- false; \
- fi
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/charts.d.plugin/README.md b/collectors/charts.d.plugin/README.md
index b224bffe..3d318f26 100644
--- a/collectors/charts.d.plugin/README.md
+++ b/collectors/charts.d.plugin/README.md
@@ -191,3 +191,5 @@ This is what you need to do:
Execute the above in this order, since netdata will (by default) attempt to start new plugins soon after they are
created in `/usr/libexec/netdata/plugins.d/`.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/charts.d.plugin/ap/README.md b/collectors/charts.d.plugin/ap/README.md
index eb4e8070..962a8565 100644
--- a/collectors/charts.d.plugin/ap/README.md
+++ b/collectors/charts.d.plugin/ap/README.md
@@ -82,3 +82,5 @@ To edit this file on your system run `/etc/netdata/edit-config charts.d/ap.conf`
## Auto-detection
The plugin is able to auto-detect if you are running access points on your linux box.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fap%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/charts.d.plugin/ap/ap.chart.sh b/collectors/charts.d.plugin/ap/ap.chart.sh
index ccc36120..a2d04c0a 100644
--- a/collectors/charts.d.plugin/ap/ap.chart.sh
+++ b/collectors/charts.d.plugin/ap/ap.chart.sh
@@ -56,8 +56,7 @@ ap_check() {
ap_create() {
local ssid dev
- for dev in "${!ap_devs[@]}"
- do
+ for dev in "${!ap_devs[@]}"; do
ssid="${ap_devs[${dev}]}"
# create the chart with 3 dimensions
@@ -99,14 +98,13 @@ ap_update() {
# for each dimension
# remember: KEEP IT SIMPLE AND SHORT
- for dev in "${!ap_devs[@]}"
- do
- echo
- echo "DEVICE ${dev}"
+ for dev in "${!ap_devs[@]}"; do
+ echo
+ echo "DEVICE ${dev}"
iw "${dev}" station dump
- done | awk "
+ done | awk '
function zero_data() {
- dev = \"\";
+ dev = "";
c = 0;
rb = 0;
tb = 0;
@@ -121,32 +119,32 @@ ap_update() {
e = 0;
}
function print_device() {
- if(dev != \"\" && length(dev) > 0) {
- print \"BEGIN ap_clients.\" dev;
- print \"SET clients = \" c;
- print \"END\";
- print \"BEGIN ap_bandwidth.\" dev;
- print \"SET received = \" rb;
- print \"SET sent = \" tb;
- print \"END\";
- print \"BEGIN ap_packets.\" dev;
- print \"SET received = \" rp;
- print \"SET sent = \" tp;
- print \"END\";
- print \"BEGIN ap_issues.\" dev;
- print \"SET retries = \" tr;
- print \"SET failures = \" tf;
- print \"END\";
+ if(dev != "" && length(dev) > 0) {
+ print "BEGIN ap_clients." dev;
+ print "SET clients = " c;
+ print "END";
+ print "BEGIN ap_bandwidth." dev;
+ print "SET received = " rb;
+ print "SET sent = " tb;
+ print "END";
+ print "BEGIN ap_packets." dev;
+ print "SET received = " rp;
+ print "SET sent = " tp;
+ print "END";
+ print "BEGIN ap_issues." dev;
+ print "SET retries = " tr;
+ print "SET failures = " tf;
+ print "END";
if( c == 0 ) c = 1;
- print \"BEGIN ap_signal.\" dev;
- print \"SET signal = \" int(s / c);
- print \"END\";
- print \"BEGIN ap_bitrate.\" dev;
- print \"SET receive = \" int(rt / c);
- print \"SET transmit = \" int(tt / c);
- print \"SET expected = \" int(e / c);
- print \"END\";
+ print "BEGIN ap_signal." dev;
+ print "SET signal = " int(s / c);
+ print "END";
+ print "BEGIN ap_bitrate." dev;
+ print "SET receive = " int(rt / c);
+ print "SET transmit = " int(tt / c);
+ print "SET expected = " int(e / c);
+ print "END";
}
zero_data();
}
@@ -155,28 +153,27 @@ ap_update() {
}
/^DEVICE / {
print_device();
- dev = \$2;
+ dev = $2;
}
/^Station/ { c++; }
- /^[ \\t]+rx bytes:/ { rb += \$3; }
- /^[ \\t]+tx bytes:/ { tb += \$3; }
- /^[ \\t]+rx packets:/ { rp += \$3; }
- /^[ \\t]+tx packets:/ { tp += \$3; }
- /^[ \\t]+tx retries:/ { tr += \$3; }
- /^[ \\t]+tx failed:/ { tf += \$3; }
- /^[ \\t]+signal:/ { x = \$2; s += x * 1000; }
- /^[ \\t]+rx bitrate:/ { x = \$3; rt += x * 1000; }
- /^[ \\t]+tx bitrate:/ { x = \$3; tt += x * 1000; }
- /^[ \\t]+expected throughput:(.*)Mbps/ {
- x=\$3;
- sub(/Mbps/, \"\", x);
+ /^[ \t]+rx bytes:/ { rb += $3; }
+ /^[ \t]+tx bytes:/ { tb += $3; }
+ /^[ \t]+rx packets:/ { rp += $3; }
+ /^[ \t]+tx packets:/ { tp += $3; }
+ /^[ \t]+tx retries:/ { tr += $3; }
+ /^[ \t]+tx failed:/ { tf += $3; }
+ /^[ \t]+signal:/ { x = $2; s += x * 1000; }
+ /^[ \t]+rx bitrate:/ { x = $3; rt += x * 1000; }
+ /^[ \t]+tx bitrate:/ { x = $3; tt += x * 1000; }
+ /^[ \t]+expected throughput:(.*)Mbps/ {
+ x=$3;
+ sub(/Mbps/, "", x);
e += x * 1000;
}
END {
print_device();
}
- "
+ '
return 0
}
-
diff --git a/collectors/charts.d.plugin/apache/README.md b/collectors/charts.d.plugin/apache/README.md
index 890cee98..27397910 100644
--- a/collectors/charts.d.plugin/apache/README.md
+++ b/collectors/charts.d.plugin/apache/README.md
@@ -1,10 +1,10 @@
+# Apache
+
> THIS MODULE IS OBSOLETE.
-> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
+> USE [THE PYTHON ONE](../../python.d.plugin/apache) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
---
-# Apache Plugin (apache)
-
The `apache` collector visualizes key performance data for an apache web server.
## Example netdata charts
@@ -125,3 +125,5 @@ curl "http://127.0.0.1:80/server-status?auto"
netdata will be able to do it too.
Notice: You may need to have the default `000-default.conf ` website enabled in order for the status mod to work.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fapache%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/charts.d.plugin/apache/apache.chart.sh b/collectors/charts.d.plugin/apache/apache.chart.sh
index 95876432..7d09ee67 100644
--- a/collectors/charts.d.plugin/apache/apache.chart.sh
+++ b/collectors/charts.d.plugin/apache/apache.chart.sh
@@ -52,21 +52,20 @@ apache_key_connsasynckeepalive=
apache_key_connsasyncclosing=
apache_detect() {
local i=0
- for x in "${@}"
- do
+ for x in "${@}"; do
case "${x}" in
- 'Total Accesses') apache_key_accesses=$((i + 1)) ;;
- 'Total kBytes') apache_key_kbytes=$((i + 1)) ;;
- 'ReqPerSec') apache_key_reqpersec=$((i + 1)) ;;
- 'BytesPerSec') apache_key_bytespersec=$((i + 1)) ;;
- 'BytesPerReq') apache_key_bytesperreq=$((i + 1)) ;;
- 'BusyWorkers') apache_key_busyworkers=$((i + 1)) ;;
- 'IdleWorkers') apache_key_idleworkers=$((i + 1));;
- 'ConnsTotal') apache_key_connstotal=$((i + 1)) ;;
- 'ConnsAsyncWriting') apache_key_connsasyncwriting=$((i + 1)) ;;
- 'ConnsAsyncKeepAlive') apache_key_connsasynckeepalive=$((i + 1)) ;;
- 'ConnsAsyncClosing') apache_key_connsasyncclosing=$((i + 1)) ;;
- 'Scoreboard') apache_key_scoreboard=$((i)) ;;
+ 'Total Accesses') apache_key_accesses=$((i + 1)) ;;
+ 'Total kBytes') apache_key_kbytes=$((i + 1)) ;;
+ 'ReqPerSec') apache_key_reqpersec=$((i + 1)) ;;
+ 'BytesPerSec') apache_key_bytespersec=$((i + 1)) ;;
+ 'BytesPerReq') apache_key_bytesperreq=$((i + 1)) ;;
+ 'BusyWorkers') apache_key_busyworkers=$((i + 1)) ;;
+ 'IdleWorkers') apache_key_idleworkers=$((i + 1)) ;;
+ 'ConnsTotal') apache_key_connstotal=$((i + 1)) ;;
+ 'ConnsAsyncWriting') apache_key_connsasyncwriting=$((i + 1)) ;;
+ 'ConnsAsyncKeepAlive') apache_key_connsasynckeepalive=$((i + 1)) ;;
+ 'ConnsAsyncClosing') apache_key_connsasyncclosing=$((i + 1)) ;;
+ 'Scoreboard') apache_key_scoreboard=$((i)) ;;
esac
i=$((i + 1))
@@ -74,20 +73,19 @@ apache_detect() {
# we will not check of the Conns*
# keys, since these are apache 2.4 specific
- [ -z "${apache_key_accesses}" ] && error "missing 'Total Accesses' from apache server: ${*}" && return 1
- [ -z "${apache_key_kbytes}" ] && error "missing 'Total kBytes' from apache server: ${*}" && return 1
- [ -z "${apache_key_reqpersec}" ] && error "missing 'ReqPerSec' from apache server: ${*}" && return 1
+ [ -z "${apache_key_accesses}" ] && error "missing 'Total Accesses' from apache server: ${*}" && return 1
+ [ -z "${apache_key_kbytes}" ] && error "missing 'Total kBytes' from apache server: ${*}" && return 1
+ [ -z "${apache_key_reqpersec}" ] && error "missing 'ReqPerSec' from apache server: ${*}" && return 1
[ -z "${apache_key_bytespersec}" ] && error "missing 'BytesPerSec' from apache server: ${*}" && return 1
[ -z "${apache_key_bytesperreq}" ] && error "missing 'BytesPerReq' from apache server: ${*}" && return 1
[ -z "${apache_key_busyworkers}" ] && error "missing 'BusyWorkers' from apache server: ${*}" && return 1
[ -z "${apache_key_idleworkers}" ] && error "missing 'IdleWorkers' from apache server: ${*}" && return 1
- [ -z "${apache_key_scoreboard}" ] && error "missing 'Scoreboard' from apache server: ${*}" && return 1
+ [ -z "${apache_key_scoreboard}" ] && error "missing 'Scoreboard' from apache server: ${*}" && return 1
- if [ ! -z "${apache_key_connstotal}" ] && \
- [ ! -z "${apache_key_connsasyncwriting}" ] && \
- [ ! -z "${apache_key_connsasynckeepalive}" ] && \
- [ ! -z "${apache_key_connsasyncclosing}" ]
- then
+ if [ ! -z "${apache_key_connstotal}" ] &&
+ [ ! -z "${apache_key_connsasyncwriting}" ] &&
+ [ ! -z "${apache_key_connsasynckeepalive}" ] &&
+ [ ! -z "${apache_key_connsasyncclosing}" ]; then
apache_has_conns=1
else
apache_has_conns=0
@@ -103,15 +101,13 @@ apache_get() {
ret=$?
IFS="${oIFS}"
- if [ $ret -ne 0 ] || [ "${#apache_response[@]}" -eq 0 ]
- then
+ if [ $ret -ne 0 ] || [ "${#apache_response[@]}" -eq 0 ]; then
return 1
fi
# the last line on the apache output is "Scoreboard"
# we use this label to detect that the output has a new word count
- if [ ${apache_keys_detected} -eq 0 ] || [ "${apache_response[${apache_key_scoreboard}]}" != "Scoreboard" ]
- then
+ if [ ${apache_keys_detected} -eq 0 ] || [ "${apache_response[${apache_key_scoreboard}]}" != "Scoreboard" ]; then
apache_detect "${apache_response[@]}" || return 1
apache_keys_detected=1
fi
@@ -131,20 +127,20 @@ apache_get() {
apache_busyworkers="${apache_response[${apache_key_busyworkers}]}"
apache_idleworkers="${apache_response[${apache_key_idleworkers}]}"
- if [ -z "${apache_accesses}" ] || \
- [ -z "${apache_kbytes}" ] || \
- [ -z "${apache_reqpersec}" ] || \
- [ -z "${apache_bytespersec}" ] || \
- [ -z "${apache_bytesperreq}" ] || \
- [ -z "${apache_busyworkers}" ]
- [ -z "${apache_idleworkers}" ]
+ if
+ [ -z "${apache_accesses}" ] ||
+ [ -z "${apache_kbytes}" ] ||
+ [ -z "${apache_reqpersec}" ] ||
+ [ -z "${apache_bytespersec}" ] ||
+ [ -z "${apache_bytesperreq}" ] ||
+ [ -z "${apache_busyworkers}" ]
+ [ -z "${apache_idleworkers}" ]
then
error "empty values got from apache server: ${apache_response[*]}"
return 1
fi
- if [ ${apache_has_conns} -eq 1 ]
- then
+ if [ ${apache_has_conns} -eq 1 ]; then
apache_connstotal="${apache_response[${apache_key_connstotal}]}"
apache_connsasyncwriting="${apache_response[${apache_key_connsasyncwriting}]}"
apache_connsasynckeepalive="${apache_response[${apache_key_connsasynckeepalive}]}"
@@ -159,8 +155,7 @@ apache_check() {
apache_get
# shellcheck disable=2181
- if [ $? -ne 0 ]
- then
+ if [ $? -ne 0 ]; then
# shellcheck disable=2154
error "cannot find stub_status on URL '${apache_url}'. Please set apache_url='http://apache.server:80/server-status?auto' in $confd/apache.conf"
return 1
@@ -191,8 +186,7 @@ CHART apache_local.net '' "apache Bandwidth" "kilobits/s" bandwidth apache.net a
DIMENSION sent '' incremental 8 1
EOF
- if [ ${apache_has_conns} -eq 1 ]
- then
+ if [ ${apache_has_conns} -eq 1 ]; then
cat <<EOF2
CHART apache_local.connections '' "apache Connections" "connections" connections apache.connections line $((apache_priority + 2)) $apache_update_every
DIMENSION connections '' absolute 1 1
@@ -240,9 +234,8 @@ SET busy = $((apache_busyworkers))
END
VALUESEOF
- if [ ${apache_has_conns} -eq 1 ]
- then
- cat <<VALUESEOF2
+ if [ ${apache_has_conns} -eq 1 ]; then
+ cat <<VALUESEOF2
BEGIN apache_local.connections $1
SET connections = $((apache_connstotal))
END
diff --git a/collectors/charts.d.plugin/apcupsd/README.md b/collectors/charts.d.plugin/apcupsd/README.md
index e69de29b..59739efc 100644
--- a/collectors/charts.d.plugin/apcupsd/README.md
+++ b/collectors/charts.d.plugin/apcupsd/README.md
@@ -0,0 +1,7 @@
+# apcupsd
+
+*Under construction*
+
+Collects UPS metrics
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fapcupsd%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh b/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh
index e26ef566..b4b92cdc 100644
--- a/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh
+++ b/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh
@@ -11,7 +11,7 @@ apcupsd_ip=
apcupsd_port=
declare -A apcupsd_sources=(
- ["local"]="127.0.0.1:3551"
+ ["local"]="127.0.0.1:3551"
)
# how frequently to collect UPS data
@@ -35,46 +35,40 @@ apcupsd_check() {
require_cmd apcaccess || return 1
# backwards compatibility
- if [ "${apcupsd_ip}:${apcupsd_port}" != ":" ]
- then
- apcupsd_sources["local"]="${apcupsd_ip}:${apcupsd_port}"
+ if [ "${apcupsd_ip}:${apcupsd_port}" != ":" ]; then
+ apcupsd_sources["local"]="${apcupsd_ip}:${apcupsd_port}"
fi
- local host working=0 failed=0
- for host in "${!apcupsd_sources[@]}"
- do
- run apcupsd_get "${apcupsd_sources[${host}]}" >/dev/null
- # shellcheck disable=2181
- if [ $? -ne 0 ]
- then
- error "cannot get information for apcupsd server ${host} on ${apcupsd_sources[${host}]}."
- failed=$((failed + 1))
- elif [ "$(apcupsd_get "${apcupsd_sources[${host}]}" | awk '/^STATUS.*/{ print $3 }')" != "ONLINE" ]
- then
- error "APC UPS ${host} on ${apcupsd_sources[${host}]} is not online."
- failed=$((failed + 1))
- else
- working=$((working + 1))
- fi
- done
-
- if [ ${working} -eq 0 ]
- then
- error "No APC UPSes found available."
- return 1
- fi
+ local host working=0 failed=0
+ for host in "${!apcupsd_sources[@]}"; do
+ run apcupsd_get "${apcupsd_sources[${host}]}" >/dev/null
+ # shellcheck disable=2181
+ if [ $? -ne 0 ]; then
+ error "cannot get information for apcupsd server ${host} on ${apcupsd_sources[${host}]}."
+ failed=$((failed + 1))
+ elif [ "$(apcupsd_get "${apcupsd_sources[${host}]}" | awk '/^STATUS.*/{ print $3 }')" != "ONLINE" ]; then
+ error "APC UPS ${host} on ${apcupsd_sources[${host}]} is not online."
+ failed=$((failed + 1))
+ else
+ working=$((working + 1))
+ fi
+ done
+
+ if [ ${working} -eq 0 ]; then
+ error "No APC UPSes found available."
+ return 1
+ fi
return 0
}
apcupsd_create() {
- local host src
- for host in "${!apcupsd_sources[@]}"
- do
- src=${apcupsd_sources[${host}]}
+ local host src
+ for host in "${!apcupsd_sources[@]}"; do
+ src=${apcupsd_sources[${host}]}
- # create the charts
- cat <<EOF
+ # create the charts
+ cat <<EOF
CHART apcupsd_${host}.charge '' "UPS Charge for ${host} on ${src}" "percentage" ups apcupsd.charge area $((apcupsd_priority + 1)) $apcupsd_update_every
DIMENSION battery_charge charge absolute 1 100
@@ -103,12 +97,14 @@ DIMENSION temp temp absolute 1 100
CHART apcupsd_${host}.time '' "UPS Time Remaining for ${host} on ${src}" "Minutes" ups apcupsd.time area $((apcupsd_priority + 2)) $apcupsd_update_every
DIMENSION time time absolute 1 100
+CHART apcupsd_${host}.online '' "UPS ONLINE flag for ${host} on ${src}" "boolean" ups apcupsd.online line $((apcupsd_priority + 8)) $apcupsd_update_every
+DIMENSION online online absolute 0 1
+
EOF
- done
+ done
return 0
}
-
apcupsd_update() {
# the first argument to this function is the microseconds since last update
# pass this parameter to the BEGIN statement (see bellow).
@@ -117,10 +113,9 @@ apcupsd_update() {
# for each dimension
# remember: KEEP IT SIMPLE AND SHORT
- local host working=0 failed=0
- for host in "${!apcupsd_sources[@]}"
- do
- apcupsd_get "${apcupsd_sources[${host}]}" | awk "
+ local host working=0 failed=0
+ for host in "${!apcupsd_sources[@]}"; do
+ apcupsd_get "${apcupsd_sources[${host}]}" | awk "
BEGIN {
battery_charge = 0;
@@ -130,70 +125,76 @@ BEGIN {
input_voltage_min = 0;
input_voltage_max = 0;
input_frequency = 0;
- output_voltage = 0;
+ output_voltage = 0;
output_voltage_nominal = 0;
load = 0;
temp = 0;
time = 0;
}
-/^BCHARGE.*/ { battery_charge = \$3 * 100 };
-/^BATTV.*/ { battery_voltage = \$3 * 100 };
-/^NOMBATTV.*/ { battery_voltage_nominal = \$3 * 100 };
-/^LINEV.*/ { input_voltage = \$3 * 100 };
-/^MINLINEV.*/ { input_voltage_min = \$3 * 100 };
-/^MAXLINEV.*/ { input_voltage_max = \$3 * 100 };
-/^LINEFREQ.*/ { input_frequency = \$3 * 100 };
-/^OUTPUTV.*/ { output_voltage = \$3 * 100 };
-/^NOMOUTV.*/ { output_voltage_nominal = \$3 * 100 };
-/^LOADPCT.*/ { load = \$3 * 100 };
-/^ITEMP.*/ { temp = \$3 * 100 };
-/^TIMELEFT.*/ { time = \$3 * 100 };
+/^BCHARGE.*/ { battery_charge = \$3 * 100 };
+/^BATTV.*/ { battery_voltage = \$3 * 100 };
+/^NOMBATTV.*/ { battery_voltage_nominal = \$3 * 100 };
+/^LINEV.*/ { input_voltage = \$3 * 100 };
+/^MINLINEV.*/ { input_voltage_min = \$3 * 100 };
+/^MAXLINEV.*/ { input_voltage_max = \$3 * 100 };
+/^LINEFREQ.*/ { input_frequency = \$3 * 100 };
+/^OUTPUTV.*/ { output_voltage = \$3 * 100 };
+/^NOMOUTV.*/ { output_voltage_nominal = \$3 * 100 };
+/^LOADPCT.*/ { load = \$3 * 100 };
+/^ITEMP.*/ { temp = \$3 * 100 };
+/^TIMELEFT.*/ { time = \$3 * 100 };
+/^STATUS.*/ { online=(\$3 == \"ONLINE\")?1:0 };
END {
- print \"BEGIN apcupsd_${host}.charge $1\";
- print \"SET battery_charge = \" battery_charge;
- print \"END\"
-
- print \"BEGIN apcupsd_${host}.battery_voltage $1\";
- print \"SET battery_voltage = \" battery_voltage;
- print \"SET battery_voltage_nominal = \" battery_voltage_nominal;
- print \"END\"
-
- print \"BEGIN apcupsd_${host}.input_voltage $1\";
- print \"SET input_voltage = \" input_voltage;
- print \"SET input_voltage_min = \" input_voltage_min;
- print \"SET input_voltage_max = \" input_voltage_max;
- print \"END\"
-
- print \"BEGIN apcupsd_${host}.input_frequency $1\";
- print \"SET input_frequency = \" input_frequency;
- print \"END\"
-
- print \"BEGIN apcupsd_${host}.output_voltage $1\";
- print \"SET output_voltage = \" output_voltage;
- print \"SET output_voltage_nominal = \" output_voltage_nominal;
- print \"END\"
-
- print \"BEGIN apcupsd_${host}.load $1\";
- print \"SET load = \" load;
- print \"END\"
-
- print \"BEGIN apcupsd_${host}.temp $1\";
- print \"SET temp = \" temp;
+ print \"BEGIN apcupsd_${host}.online $1\";
+ print \"SET online = \" online;
print \"END\"
- print \"BEGIN apcupsd_${host}.time $1\";
- print \"SET time = \" time;
- print \"END\"
+ if (online == 1) {
+ print \"BEGIN apcupsd_${host}.charge $1\";
+ print \"SET battery_charge = \" battery_charge;
+ print \"END\"
+
+ print \"BEGIN apcupsd_${host}.battery_voltage $1\";
+ print \"SET battery_voltage = \" battery_voltage;
+ print \"SET battery_voltage_nominal = \" battery_voltage_nominal;
+ print \"END\"
+
+ print \"BEGIN apcupsd_${host}.input_voltage $1\";
+ print \"SET input_voltage = \" input_voltage;
+ print \"SET input_voltage_min = \" input_voltage_min;
+ print \"SET input_voltage_max = \" input_voltage_max;
+ print \"END\"
+
+ print \"BEGIN apcupsd_${host}.input_frequency $1\";
+ print \"SET input_frequency = \" input_frequency;
+ print \"END\"
+
+ print \"BEGIN apcupsd_${host}.output_voltage $1\";
+ print \"SET output_voltage = \" output_voltage;
+ print \"SET output_voltage_nominal = \" output_voltage_nominal;
+ print \"END\"
+
+ print \"BEGIN apcupsd_${host}.load $1\";
+ print \"SET load = \" load;
+ print \"END\"
+
+ print \"BEGIN apcupsd_${host}.temp $1\";
+ print \"SET temp = \" temp;
+ print \"END\"
+
+ print \"BEGIN apcupsd_${host}.time $1\";
+ print \"SET time = \" time;
+ print \"END\"
+ }
}"
- # shellcheck disable=SC2181
- if [ $? -ne 0 ]
- then
- failed=$((failed + 1))
- error "failed to get values for APC UPS ${host} on ${apcupsd_sources[${host}]}" && return 1
- else
- working=$((working + 1))
- fi
- done
+ # shellcheck disable=SC2181
+ if [ $? -ne 0 ]; then
+ failed=$((failed + 1))
+ error "failed to get values for APC UPS ${host} on ${apcupsd_sources[${host}]}" && return 1
+ else
+ working=$((working + 1))
+ fi
+ done
[ $working -eq 0 ] && error "failed to get values from all APC UPSes" && return 1
diff --git a/collectors/charts.d.plugin/charts.d.dryrun-helper.sh b/collectors/charts.d.plugin/charts.d.dryrun-helper.sh
index 67496c1b..91af2c54 100755
--- a/collectors/charts.d.plugin/charts.d.dryrun-helper.sh
+++ b/collectors/charts.d.plugin/charts.d.dryrun-helper.sh
@@ -17,7 +17,7 @@ tmp1="$(mktemp)"
tmp2="$(mktemp)"
myset() {
- set | grep -v "^_=" | grep -v "^PIPESTATUS=" | grep -v "^BASH_LINENO="
+ set | grep -v "^_=" | grep -v "^PIPESTATUS=" | grep -v "^BASH_LINENO="
}
# save 2 'set'
@@ -26,52 +26,46 @@ myset >"$tmp2"
# make sure they don't differ
diff "$tmp1" "$tmp2" >/dev/null 2>&1
-if [ $? -ne 0 ]
-then
- # they differ, we cannot do the check
- echo >&2 "$me: cannot check with diff."
- can_diff=0
+if [ $? -ne 0 ]; then
+ # they differ, we cannot do the check
+ echo >&2 "$me: cannot check with diff."
+ can_diff=0
fi
# do it again, now including the script
myset >"$tmp1"
# include the plugin and its config
-if [ -f "$conf" ]
-then
- # shellcheck source=/dev/null
- . "$conf"
- if [ $? -ne 0 ]
- then
- echo >&2 "$me: cannot load config file $conf"
- rm "$tmp1" "$tmp2"
- exit 1
- fi
+if [ -f "$conf" ]; then
+ # shellcheck source=/dev/null
+ . "$conf"
+ if [ $? -ne 0 ]; then
+ echo >&2 "$me: cannot load config file $conf"
+ rm "$tmp1" "$tmp2"
+ exit 1
+ fi
fi
# shellcheck source=/dev/null
. "$chart"
-if [ $? -ne 0 ]
-then
- echo >&2 "$me: cannot load chart file $chart"
- rm "$tmp1" "$tmp2"
- exit 1
+if [ $? -ne 0 ]; then
+ echo >&2 "$me: cannot load chart file $chart"
+ rm "$tmp1" "$tmp2"
+ exit 1
fi
# remove all variables starting with the plugin name
myset | grep -v "^$name" >"$tmp2"
-if [ $can_diff -eq 1 ]
-then
- # check if they are different
- # make sure they don't differ
- diff "$tmp1" "$tmp2" >&2
- if [ $? -ne 0 ]
- then
- # they differ
- rm "$tmp1" "$tmp2"
- exit 1
- fi
+if [ $can_diff -eq 1 ]; then
+ # check if they are different
+ # make sure they don't differ
+ diff "$tmp1" "$tmp2" >&2
+ if [ $? -ne 0 ]; then
+ # they differ
+ rm "$tmp1" "$tmp2"
+ exit 1
+ fi
fi
rm "$tmp1" "$tmp2"
diff --git a/collectors/charts.d.plugin/charts.d.plugin b/collectors/charts.d.plugin/charts.d.plugin
deleted file mode 100644
index 1c6e8c5c..00000000
--- a/collectors/charts.d.plugin/charts.d.plugin
+++ /dev/null
@@ -1,743 +0,0 @@
-#!/usr/bin/env bash
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
-# GPL v3+
-#
-# charts.d.plugin allows easy development of BASH plugins
-#
-# if you need to run parallel charts.d processes, link this file to a different name
-# in the same directory, with a .plugin suffix and netdata will start both of them,
-# each will have a different config file and modules configuration directory.
-#
-
-export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin"
-
-PROGRAM_FILE="$0"
-PROGRAM_NAME="$(basename $0)"
-PROGRAM_NAME="${PROGRAM_NAME/.plugin}"
-MODULE_NAME="main"
-
-# -----------------------------------------------------------------------------
-# create temp dir
-
-debug=0
-TMP_DIR=
-chartsd_cleanup() {
- trap '' EXIT QUIT HUP INT TERM
-
- if [ ! -z "$TMP_DIR" -a -d "$TMP_DIR" ]
- then
- [ $debug -eq 1 ] && echo >&2 "$PROGRAM_NAME: cleaning up temporary directory $TMP_DIR ..."
- rm -rf "$TMP_DIR"
- fi
- exit 0
-}
-trap chartsd_cleanup EXIT QUIT HUP INT TERM
-
-if [ $UID = "0" ]
-then
- TMP_DIR="$( mktemp -d /var/run/netdata-${PROGRAM_NAME}-XXXXXXXXXX )"
-else
- TMP_DIR="$( mktemp -d /tmp/.netdata-${PROGRAM_NAME}-XXXXXXXXXX )"
-fi
-
-logdate() {
- date "+%Y-%m-%d %H:%M:%S"
-}
-
-log() {
- local status="${1}"
- shift
-
- echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${MODULE_NAME}: ${*}"
-
-}
-
-warning() {
- log WARNING "${@}"
-}
-
-error() {
- log ERROR "${@}"
-}
-
-info() {
- log INFO "${@}"
-}
-
-fatal() {
- log FATAL "${@}"
- echo "DISABLE"
- exit 1
-}
-
-debug() {
- [ $debug -eq 1 ] && log DEBUG "${@}"
-}
-
-# -----------------------------------------------------------------------------
-# check a few commands
-
-require_cmd() {
- local x=$(which "${1}" 2>/dev/null || command -v "${1}" 2>/dev/null)
- if [ -z "${x}" -o ! -x "${x}" ]
- then
- warning "command '${1}' is not found in ${PATH}."
- eval "${1^^}_CMD=\"\""
- return 1
- fi
-
- eval "${1^^}_CMD=\"${x}\""
- return 0
-}
-
-require_cmd date || exit 1
-require_cmd sed || exit 1
-require_cmd basename || exit 1
-require_cmd dirname || exit 1
-require_cmd cat || exit 1
-require_cmd grep || exit 1
-require_cmd egrep || exit 1
-require_cmd mktemp || exit 1
-require_cmd awk || exit 1
-require_cmd timeout || exit 1
-require_cmd curl || exit 1
-
-# -----------------------------------------------------------------------------
-
-[ $(( ${BASH_VERSINFO[0]} )) -lt 4 ] && fatal "BASH version 4 or later is required, but found version: ${BASH_VERSION}. Please upgrade."
-
-info "started from '$PROGRAM_FILE' with options: $*"
-
-# -----------------------------------------------------------------------------
-# internal defaults
-# netdata exposes a few environment variables for us
-
-[ -z "${NETDATA_PLUGINS_DIR}" ] && NETDATA_PLUGINS_DIR="$(dirname "${0}")"
-[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="/usr/local/etc/netdata"
-[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="/usr/local/lib/netdata/conf.d"
-
-pluginsd="${NETDATA_PLUGINS_DIR}"
-stockconfd="${NETDATA_STOCK_CONFIG_DIR}/${PROGRAM_NAME}"
-userconfd="${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}"
-olduserconfd="${NETDATA_USER_CONFIG_DIR}"
-chartsd="$pluginsd/../charts.d"
-
-minimum_update_frequency="${NETDATA_UPDATE_EVERY-1}"
-update_every=${minimum_update_frequency} # this will be overwritten by the command line
-
-# work around for non BASH shells
-charts_create="_create"
-charts_update="_update"
-charts_check="_check"
-charts_undescore="_"
-
-# when making iterations, charts.d can loop more frequently
-# to prevent plugins missing iterations.
-# this is a percentage relative to update_every to align its
-# iterations.
-# The minimum is 10%, the maximum 100%.
-# So, if update_every is 1 second and time_divisor is 50,
-# charts.d will iterate every 500ms.
-# Charts will be called to collect data only if the time
-# passed since the last time the collected data is equal or
-# above their update_every.
-time_divisor=50
-
-# number of seconds to run without restart
-# after this time, charts.d.plugin will exit
-# netdata will restart it
-restart_timeout=$((3600 * 4))
-
-# check if the charts.d plugins are using global variables
-# they should not.
-# It does not currently support BASH v4 arrays, so it is
-# disabled
-dryrunner=0
-
-# check for timeout command
-check_for_timeout=1
-
-# the default enable/disable value for all charts
-enable_all_charts="yes"
-
-# -----------------------------------------------------------------------------
-# parse parameters
-
-check=0
-chart_only=
-while [ ! -z "$1" ]
-do
- if [ "$1" = "check" ]
- then
- check=1
- shift
- continue
- fi
-
- if [ "$1" = "debug" -o "$1" = "all" ]
- then
- debug=1
- shift
- continue
- fi
-
- if [ -f "$chartsd/$1.chart.sh" ]
- then
- debug=1
- chart_only="$( echo $1.chart.sh | sed "s/\.chart\.sh$//g" )"
- shift
- continue
- fi
-
- if [ -f "$chartsd/$1" ]
- then
- debug=1
- chart_only="$( echo $1 | sed "s/\.chart\.sh$//g" )"
- shift
- continue
- fi
-
- # number check
- n="$1"
- x=$(( n ))
- if [ "$x" = "$n" ]
- then
- shift
- update_every=$x
- [ $update_every -lt $minimum_update_frequency ] && update_every=$minimum_update_frequency
- continue
- fi
-
- fatal "Cannot understand parameter $1. Aborting."
-done
-
-
-# -----------------------------------------------------------------------------
-# loop control
-
-# default sleep function
-LOOPSLEEPMS_HIGHRES=0
-now_ms=
-current_time_ms_default() {
- now_ms="$(date +'%s')000"
-}
-current_time_ms="current_time_ms_default"
-current_time_ms_accuracy=1
-mysleep="sleep"
-
-# if found and included, this file overwrites loopsleepms()
-# and current_time_ms() with a high resolution timer function
-# for precise looping.
-source "$pluginsd/loopsleepms.sh.inc"
-[ $? -ne 0 ] && error "Failed to load '$pluginsd/loopsleepms.sh.inc'."
-
-# -----------------------------------------------------------------------------
-# load my configuration
-
-for myconfig in "${NETDATA_STOCK_CONFIG_DIR}/${PROGRAM_NAME}.conf" "${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}.conf"
-do
- if [ -f "$myconfig" ]
- then
- source "$myconfig"
- if [ $? -ne 0 ]
- then
- error "Config file '$myconfig' loaded with errors."
- else
- info "Configuration file '$myconfig' loaded."
- fi
- else
- warning "Configuration file '$myconfig' not found."
- fi
-done
-
-# make sure time_divisor is right
-time_divisor=$((time_divisor))
-[ $time_divisor -lt 10 ] && time_divisor=10
-[ $time_divisor -gt 100 ] && time_divisor=100
-
-
-# we check for the timeout command, after we load our
-# configuration, so that the user may overwrite the
-# timeout command we use, providing a function that
-# can emulate the timeout command we need:
-# > timeout SECONDS command ...
-if [ $check_for_timeout -eq 1 ]
- then
- require_cmd timeout || exit 1
-fi
-
-# -----------------------------------------------------------------------------
-# internal checks
-
-# netdata passes the requested update frequency as the first argument
-update_every=$(( update_every + 1 - 1)) # makes sure it is a number
-test $update_every -eq 0 && update_every=1 # if it is zero, make it 1
-
-# check the charts.d directory
-[ ! -d "$chartsd" ] && fatal "cannot find charts directory '$chartsd'"
-
-# -----------------------------------------------------------------------------
-# library functions
-
-fixid() {
- echo "$*" |\
- tr -c "[A-Z][a-z][0-9]" "_" |\
- sed -e "s|^_\+||g" -e "s|_\+$||g" -e "s|_\+|_|g" |\
- tr "[A-Z]" "[a-z]"
-}
-
-run() {
- local ret pid="${BASHPID}" t
-
- if [ "z${1}" = "z-t" -a "${2}" != "0" ]
- then
- t="${2}"
- shift 2
- timeout ${t} "${@}" 2>"${TMP_DIR}/run.${pid}"
- ret=$?
- else
- "${@}" 2>"${TMP_DIR}/run.${pid}"
- ret=$?
- fi
-
- if [ ${ret} -ne 0 ]
- then
- {
- printf "$(logdate): ${PROGRAM_NAME}: ${status}: ${MODULE_NAME}: command '"
- printf "%q " "${@}"
- printf "' failed with code ${ret}:\n --- BEGIN TRACE ---\n"
- cat "${TMP_DIR}/run.${pid}"
- printf " --- END TRACE ---\n"
- } >&2
- fi
- rm "${TMP_DIR}/run.${pid}"
-
- return ${ret}
-}
-
-# convert any floating point number
-# to integer, give a multiplier
-# the result is stored in ${FLOAT2INT_RESULT}
-# so that no fork is necessary
-# the multiplier must be a power of 10
-float2int() {
- local f m="$2" a b l v=($1)
- f=${v[0]}
-
- # the length of the multiplier - 1
- l=$(( ${#m} - 1 ))
-
- # check if the number is in scientific notation
- if [[ ${f} =~ ^[[:space:]]*(-)?[0-9.]+(e|E)(\+|-)[0-9]+ ]]
- then
- # convert it to decimal
- # unfortunately, this fork cannot be avoided
- # if you know of a way to avoid it, please let me know
- f=$(printf "%0.${l}f" ${f})
- fi
-
- # split the floating point number
- # in integer (a) and decimal (b)
- a=${f/.*/}
- b=${f/*./}
-
- # if the integer part is missing
- # set it to zero
- [ -z "${a}" ] && a="0"
-
- # strip leading zeros from the integer part
- # base 10 convertion
- a=$((10#$a))
-
- # check the length of the decimal part
- # against the length of the multiplier
- if [ ${#b} -gt ${l} ]
- then
- # too many digits - take the most significant
- b=${b:0:${l}}
-
- elif [ ${#b} -lt ${l} ]
- then
- # too few digits - pad with zero on the right
- local z="00000000000000000000000" r=$((l - ${#b}))
- b="${b}${z:0:${r}}"
- fi
-
- # strip leading zeros from the decimal part
- # base 10 convertion
- b=$((10#$b))
-
- # store the result
- FLOAT2INT_RESULT=$(( (a * m) + b ))
-}
-
-
-# -----------------------------------------------------------------------------
-# charts check functions
-
-all_charts() {
- cd "$chartsd"
- [ $? -ne 0 ] && error "cannot cd to $chartsd" && return 1
-
- ls *.chart.sh | sed "s/\.chart\.sh$//g"
-}
-
-declare -A charts_enable_keyword=(
- ['apache']="force"
- ['cpu_apps']="force"
- ['cpufreq']="force"
- ['example']="force"
- ['exim']="force"
- ['hddtemp']="force"
- ['load_average']="force"
- ['mem_apps']="force"
- ['mysql']="force"
- ['nginx']="force"
- ['phpfpm']="force"
- ['postfix']="force"
- ['sensors']="force"
- ['squid']="force"
- ['tomcat']="force"
- )
-
-all_enabled_charts() {
- local charts= enabled= required=
-
- # find all enabled charts
-
- for chart in $( all_charts )
- do
- MODULE_NAME="${chart}"
-
- eval "enabled=\$$chart"
- if [ -z "${enabled}" ]
- then
- enabled="${enable_all_charts}"
- fi
-
- required="${charts_enable_keyword[${chart}]}"
- [ -z "${required}" ] && required="yes"
-
- if [ ! "${enabled}" = "${required}" ]
- then
- info "is disabled. Add a line with $chart=$required in '${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}.conf' to enable it (or remove the line that disables it)."
- else
- debug "is enabled for auto-detection."
- local charts="$charts $chart"
- fi
- done
- MODULE_NAME="main"
-
- local charts2=
- for chart in $charts
- do
- MODULE_NAME="${chart}"
-
- # check the enabled charts
- local check="$( cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_check()" )"
- if [ -z "$check" ]
- then
- error "module '$chart' does not seem to have a $chart$charts_check() function. Disabling it."
- continue
- fi
-
- local create="$( cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_create()" )"
- if [ -z "$create" ]
- then
- error "module '$chart' does not seem to have a $chart$charts_create() function. Disabling it."
- continue
- fi
-
- local update="$( cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_update()" )"
- if [ -z "$update" ]
- then
- error "module '$chart' does not seem to have a $chart$charts_update() function. Disabling it."
- continue
- fi
-
- # check its config
- #if [ -f "$userconfd/$chart.conf" ]
- #then
- # if [ ! -z "$( cat "$userconfd/$chart.conf" | sed "s/^ \+//g" | grep -v "^$" | grep -v "^#" | grep -v "^$chart$charts_undescore" )" ]
- # then
- # error "module's $chart config $userconfd/$chart.conf should only have lines starting with $chart$charts_undescore . Disabling it."
- # continue
- # fi
- #fi
-
- #if [ $dryrunner -eq 1 ]
- # then
- # "$pluginsd/charts.d.dryrun-helper.sh" "$chart" "$chartsd/$chart.chart.sh" "$userconfd/$chart.conf" >/dev/null
- # if [ $? -ne 0 ]
- # then
- # error "module's $chart did not pass the dry run check. This means it uses global variables not starting with $chart. Disabling it."
- # continue
- # fi
- #fi
-
- local charts2="$charts2 $chart"
- done
- MODULE_NAME="main"
-
- echo $charts2
- debug "enabled charts: $charts2"
-}
-
-# -----------------------------------------------------------------------------
-# load the charts
-
-suffix_retries="_retries"
-suffix_update_every="_update_every"
-active_charts=
-for chart in $( all_enabled_charts )
-do
- MODULE_NAME="${chart}"
-
- debug "loading module: '$chartsd/$chart.chart.sh'"
-
- source "$chartsd/$chart.chart.sh"
- [ $? -ne 0 ] && warning "Module '$chartsd/$chart.chart.sh' loaded with errors."
-
- # first load the stock config
- if [ -f "$stockconfd/$chart.conf" ]
- then
- debug "loading module configuration: '$stockconfd/$chart.conf'"
- source "$stockconfd/$chart.conf"
- [ $? -ne 0 ] && warning "Config file '$stockconfd/$chart.conf' loaded with errors."
- else
- debug "not found module configuration: '$stockconfd/$chart.conf'"
- fi
-
- # then load the user config (it overwrites the stock)
- if [ -f "$userconfd/$chart.conf" ]
- then
- debug "loading module configuration: '$userconfd/$chart.conf'"
- source "$userconfd/$chart.conf"
- [ $? -ne 0 ] && warning "Config file '$userconfd/$chart.conf' loaded with errors."
- else
- debug "not found module configuration: '$userconfd/$chart.conf'"
-
- if [ -f "$olduserconfd/$chart.conf" ]
- then
- # support for very old netdata that had the charts.d module configs in /etc/netdata
- info "loading module configuration from obsolete location: '$olduserconfd/$chart.conf'"
- source "$olduserconfd/$chart.conf"
- [ $? -ne 0 ] && warning "Config file '$olduserconfd/$chart.conf' loaded with errors."
- fi
- fi
-
- eval "dt=\$$chart$suffix_update_every"
- dt=$(( dt + 1 - 1 )) # make sure it is a number
- if [ $dt -lt $update_every ]
- then
- eval "$chart$suffix_update_every=$update_every"
- fi
-
- $chart$charts_check
- if [ $? -eq 0 ]
- then
- debug "module '$chart' activated"
- active_charts="$active_charts $chart"
- else
- error "module's '$chart' check() function reports failure."
- fi
-done
-MODULE_NAME="main"
-debug "activated modules: $active_charts"
-
-
-# -----------------------------------------------------------------------------
-# check overwrites
-
-# enable work time reporting
-debug_time=
-test $debug -eq 1 && debug_time=tellwork
-
-# if we only need a specific chart, remove all the others
-if [ ! -z "${chart_only}" ]
-then
- debug "requested to run only for: '${chart_only}'"
- check_charts=
- for chart in $active_charts
- do
- if [ "$chart" = "$chart_only" ]
- then
- check_charts="$chart"
- break
- fi
- done
- active_charts="$check_charts"
-fi
-debug "activated charts: $active_charts"
-
-# stop if we just need a pre-check
-if [ $check -eq 1 ]
-then
- info "CHECK RESULT"
- info "Will run the charts: $active_charts"
- exit 0
-fi
-
-# -----------------------------------------------------------------------------
-
-cd "${TMP_DIR}" || exit 1
-
-# -----------------------------------------------------------------------------
-# create charts
-
-run_charts=
-for chart in $active_charts
-do
- MODULE_NAME="${chart}"
-
- debug "calling '$chart$charts_create()'..."
- $chart$charts_create
- if [ $? -eq 0 ]
- then
- run_charts="$run_charts $chart"
- debug "'$chart' initialized."
- else
- error "module's '$chart' function '$chart$charts_create()' reports failure."
- fi
-done
-MODULE_NAME="main"
-debug "run_charts='$run_charts'"
-
-
-# -----------------------------------------------------------------------------
-# update dimensions
-
-[ -z "$run_charts" ] && fatal "No charts to collect data from."
-
-declare -A charts_last_update=() charts_update_every=() charts_retries=() charts_next_update=() charts_run_counter=() charts_serial_failures=()
-global_update() {
- local exit_at \
- c=0 dt ret last_ms exec_start_ms exec_end_ms \
- chart now_charts=() next_charts=($run_charts) \
- next_ms x seconds millis
-
- # return the current time in ms in $now_ms
- ${current_time_ms}
-
- exit_at=$(( now_ms + (restart_timeout * 1000) ))
-
- for chart in $run_charts
- do
- eval "charts_update_every[$chart]=\$$chart$suffix_update_every"
- test -z "${charts_update_every[$chart]}" && charts_update_every[$chart]=$update_every
-
- eval "charts_retries[$chart]=\$$chart$suffix_retries"
- test -z "${charts_retries[$chart]}" && charts_retries[$chart]=10
-
- charts_last_update[$chart]=$((now_ms - (now_ms % (charts_update_every[$chart] * 1000) ) ))
- charts_next_update[$chart]=$(( charts_last_update[$chart] + (charts_update_every[$chart] * 1000) ))
- charts_run_counter[$chart]=0
- charts_serial_failures[$chart]=0
-
- echo "CHART netdata.plugin_chartsd_$chart '' 'Execution time for $chart plugin' 'milliseconds / run' charts.d netdata.plugin_charts area 145000 ${charts_update_every[$chart]}"
- echo "DIMENSION run_time 'run time' absolute 1 1"
- done
-
- # the main loop
- while [ "${#next_charts[@]}" -gt 0 ]
- do
- c=$((c + 1))
- now_charts=("${next_charts[@]}")
- next_charts=()
-
- # return the current time in ms in $now_ms
- ${current_time_ms}
-
- for chart in "${now_charts[@]}"
- do
- MODULE_NAME="${chart}"
-
- if [ ${now_ms} -ge ${charts_next_update[$chart]} ]
- then
- last_ms=${charts_last_update[$chart]}
- dt=$(( (now_ms - last_ms) ))
-
- charts_last_update[$chart]=${now_ms}
-
- while [ ${charts_next_update[$chart]} -lt ${now_ms} ]
- do
- charts_next_update[$chart]=$(( charts_next_update[$chart] + (charts_update_every[$chart] * 1000) ))
- done
-
- # the first call should not give a duration
- # so that netdata calibrates to current time
- dt=$(( dt * 1000 ))
- charts_run_counter[$chart]=$(( charts_run_counter[$chart] + 1 ))
- if [ ${charts_run_counter[$chart]} -eq 1 ]
- then
- dt=
- fi
-
- exec_start_ms=$now_ms
- $chart$charts_update $dt
- ret=$?
-
- # return the current time in ms in $now_ms
- ${current_time_ms}; exec_end_ms=$now_ms
-
- echo "BEGIN netdata.plugin_chartsd_$chart $dt"
- echo "SET run_time = $(( exec_end_ms - exec_start_ms ))"
- echo "END"
-
- if [ $ret -eq 0 ]
- then
- charts_serial_failures[$chart]=0
- next_charts+=($chart)
- else
- charts_serial_failures[$chart]=$(( charts_serial_failures[$chart] + 1 ))
-
- if [ ${charts_serial_failures[$chart]} -gt ${charts_retries[$chart]} ]
- then
- error "module's '$chart' update() function reported failure ${charts_serial_failures[$chart]} times. Disabling it."
- else
- error "module's '$chart' update() function reports failure. Will keep trying for a while."
- next_charts+=($chart)
- fi
- fi
- else
- next_charts+=($chart)
- fi
- done
- MODULE_NAME="${chart}"
-
- # wait the time you are required to
- next_ms=$((now_ms + (update_every * 1000 * 100) ))
- for x in "${charts_next_update[@]}"; do [ ${x} -lt ${next_ms} ] && next_ms=${x}; done
- next_ms=$((next_ms - now_ms))
-
- if [ ${LOOPSLEEPMS_HIGHRES} -eq 1 -a ${next_ms} -gt 0 ]
- then
- next_ms=$(( next_ms + current_time_ms_accuracy ))
- seconds=$(( next_ms / 1000 ))
- millis=$(( next_ms % 1000 ))
- if [ ${millis} -lt 10 ]
- then
- millis="00${millis}"
- elif [ ${millis} -lt 100 ]
- then
- millis="0${millis}"
- fi
-
- debug "sleeping for ${seconds}.${millis} seconds."
- ${mysleep} ${seconds}.${millis}
- else
- debug "sleeping for ${update_every} seconds."
- ${mysleep} $update_every
- fi
-
- test ${now_ms} -ge ${exit_at} && exit 0
- done
-
- fatal "nothing left to do, exiting..."
-}
-
-global_update
diff --git a/collectors/charts.d.plugin/charts.d.plugin.in b/collectors/charts.d.plugin/charts.d.plugin.in
index 3477894d..05a63875 100755
--- a/collectors/charts.d.plugin/charts.d.plugin.in
+++ b/collectors/charts.d.plugin/charts.d.plugin.in
@@ -17,7 +17,7 @@ export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin"
PROGRAM_FILE="$0"
PROGRAM_NAME="$(basename $0)"
-PROGRAM_NAME="${PROGRAM_NAME/.plugin}"
+PROGRAM_NAME="${PROGRAM_NAME/.plugin/}"
MODULE_NAME="main"
# -----------------------------------------------------------------------------
@@ -26,72 +26,69 @@ MODULE_NAME="main"
debug=0
TMP_DIR=
chartsd_cleanup() {
- trap '' EXIT QUIT HUP INT TERM
-
- if [ ! -z "$TMP_DIR" -a -d "$TMP_DIR" ]
- then
- [ $debug -eq 1 ] && echo >&2 "$PROGRAM_NAME: cleaning up temporary directory $TMP_DIR ..."
- rm -rf "$TMP_DIR"
- fi
- exit 0
+ trap '' EXIT QUIT HUP INT TERM
+
+ if [ ! -z "$TMP_DIR" -a -d "$TMP_DIR" ]; then
+ [ $debug -eq 1 ] && echo >&2 "$PROGRAM_NAME: cleaning up temporary directory $TMP_DIR ..."
+ rm -rf "$TMP_DIR"
+ fi
+ exit 0
}
trap chartsd_cleanup EXIT QUIT HUP INT TERM
-if [ $UID = "0" ]
-then
- TMP_DIR="$( mktemp -d /var/run/netdata-${PROGRAM_NAME}-XXXXXXXXXX )"
+if [ $UID = "0" ]; then
+ TMP_DIR="$(mktemp -d /var/run/netdata-${PROGRAM_NAME}-XXXXXXXXXX)"
else
- TMP_DIR="$( mktemp -d /tmp/.netdata-${PROGRAM_NAME}-XXXXXXXXXX )"
+ TMP_DIR="$(mktemp -d /tmp/.netdata-${PROGRAM_NAME}-XXXXXXXXXX)"
fi
logdate() {
- date "+%Y-%m-%d %H:%M:%S"
+ date "+%Y-%m-%d %H:%M:%S"
}
log() {
- local status="${1}"
- shift
+ local status="${1}"
+ shift
- echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${MODULE_NAME}: ${*}"
+ echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${MODULE_NAME}: ${*}"
}
warning() {
- log WARNING "${@}"
+ log WARNING "${@}"
}
error() {
- log ERROR "${@}"
+ log ERROR "${@}"
}
info() {
- log INFO "${@}"
+ log INFO "${@}"
}
fatal() {
- log FATAL "${@}"
- echo "DISABLE"
- exit 1
+ log FATAL "${@}"
+ echo "DISABLE"
+ exit 1
}
debug() {
- [ $debug -eq 1 ] && log DEBUG "${@}"
+ [ $debug -eq 1 ] && log DEBUG "${@}"
}
# -----------------------------------------------------------------------------
# check a few commands
require_cmd() {
- local x=$(which "${1}" 2>/dev/null || command -v "${1}" 2>/dev/null)
- if [ -z "${x}" -o ! -x "${x}" ]
- then
- warning "command '${1}' is not found in ${PATH}."
- eval "${1^^}_CMD=\"\""
- return 1
- fi
-
- eval "${1^^}_CMD=\"${x}\""
- return 0
+ local x=$(which "${1}" 2>/dev/null || command -v "${1}" 2>/dev/null)
+ if [ -z "${x}" -o ! -x "${x}" ]; then
+ warning "command '${1}' is not found in ${PATH}."
+ eval "${1^^}_CMD=\"\""
+ return 1
+ fi
+
+ eval "${1^^}_CMD=\"${x}\""
+ return 0
}
require_cmd date || exit 1
@@ -108,7 +105,7 @@ require_cmd curl || exit 1
# -----------------------------------------------------------------------------
-[ $(( ${BASH_VERSINFO[0]} )) -lt 4 ] && fatal "BASH version 4 or later is required, but found version: ${BASH_VERSION}. Please upgrade."
+[ $((BASH_VERSINFO[0])) -lt 4 ] && fatal "BASH version 4 or later is required, but found version: ${BASH_VERSION}. Please upgrade."
info "started from '$PROGRAM_FILE' with options: $*"
@@ -117,7 +114,7 @@ info "started from '$PROGRAM_FILE' with options: $*"
# netdata exposes a few environment variables for us
[ -z "${NETDATA_PLUGINS_DIR}" ] && NETDATA_PLUGINS_DIR="$(dirname "${0}")"
-[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="@configdir_POST@"
+[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="@configdir_POST@"
[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="@libconfigdir_POST@"
pluginsd="${NETDATA_PLUGINS_DIR}"
@@ -127,7 +124,7 @@ olduserconfd="${NETDATA_USER_CONFIG_DIR}"
chartsd="$pluginsd/../charts.d"
minimum_update_frequency="${NETDATA_UPDATE_EVERY-1}"
-update_every=${minimum_update_frequency} # this will be overwritten by the command line
+update_every=${minimum_update_frequency} # this will be overwritten by the command line
# work around for non BASH shells
charts_create="_create"
@@ -169,53 +166,46 @@ enable_all_charts="yes"
check=0
chart_only=
-while [ ! -z "$1" ]
-do
- if [ "$1" = "check" ]
- then
- check=1
- shift
- continue
- fi
-
- if [ "$1" = "debug" -o "$1" = "all" ]
- then
- debug=1
- shift
- continue
- fi
-
- if [ -f "$chartsd/$1.chart.sh" ]
- then
- debug=1
- chart_only="$( echo $1.chart.sh | sed "s/\.chart\.sh$//g" )"
- shift
- continue
- fi
-
- if [ -f "$chartsd/$1" ]
- then
- debug=1
- chart_only="$( echo $1 | sed "s/\.chart\.sh$//g" )"
- shift
- continue
- fi
-
- # number check
- n="$1"
- x=$(( n ))
- if [ "$x" = "$n" ]
- then
- shift
- update_every=$x
- [ $update_every -lt $minimum_update_frequency ] && update_every=$minimum_update_frequency
- continue
- fi
-
- fatal "Cannot understand parameter $1. Aborting."
+while [ ! -z "$1" ]; do
+ if [ "$1" = "check" ]; then
+ check=1
+ shift
+ continue
+ fi
+
+ if [ "$1" = "debug" -o "$1" = "all" ]; then
+ debug=1
+ shift
+ continue
+ fi
+
+ if [ -f "$chartsd/$1.chart.sh" ]; then
+ debug=1
+ chart_only="$(echo $1.chart.sh | sed "s/\.chart\.sh$//g")"
+ shift
+ continue
+ fi
+
+ if [ -f "$chartsd/$1" ]; then
+ debug=1
+ chart_only="$(echo $1 | sed "s/\.chart\.sh$//g")"
+ shift
+ continue
+ fi
+
+ # number check
+ n="$1"
+ x=$((n))
+ if [ "$x" = "$n" ]; then
+ shift
+ update_every=$x
+ [ $update_every -lt $minimum_update_frequency ] && update_every=$minimum_update_frequency
+ continue
+ fi
+
+ fatal "Cannot understand parameter $1. Aborting."
done
-
# -----------------------------------------------------------------------------
# loop control
@@ -223,7 +213,7 @@ done
LOOPSLEEPMS_HIGHRES=0
now_ms=
current_time_ms_default() {
- now_ms="$(date +'%s')000"
+ now_ms="$(date +'%s')000"
}
current_time_ms="current_time_ms_default"
current_time_ms_accuracy=1
@@ -238,20 +228,17 @@ source "$pluginsd/loopsleepms.sh.inc"
# -----------------------------------------------------------------------------
# load my configuration
-for myconfig in "${NETDATA_STOCK_CONFIG_DIR}/${PROGRAM_NAME}.conf" "${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}.conf"
-do
- if [ -f "$myconfig" ]
- then
- source "$myconfig"
- if [ $? -ne 0 ]
- then
- error "Config file '$myconfig' loaded with errors."
- else
- info "Configuration file '$myconfig' loaded."
- fi
- else
- warning "Configuration file '$myconfig' not found."
- fi
+for myconfig in "${NETDATA_STOCK_CONFIG_DIR}/${PROGRAM_NAME}.conf" "${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}.conf"; do
+ if [ -f "$myconfig" ]; then
+ source "$myconfig"
+ if [ $? -ne 0 ]; then
+ error "Config file '$myconfig' loaded with errors."
+ else
+ info "Configuration file '$myconfig' loaded."
+ fi
+ else
+ warning "Configuration file '$myconfig' not found."
+ fi
done
# make sure time_divisor is right
@@ -259,22 +246,20 @@ time_divisor=$((time_divisor))
[ $time_divisor -lt 10 ] && time_divisor=10
[ $time_divisor -gt 100 ] && time_divisor=100
-
# we check for the timeout command, after we load our
# configuration, so that the user may overwrite the
# timeout command we use, providing a function that
# can emulate the timeout command we need:
# > timeout SECONDS command ...
-if [ $check_for_timeout -eq 1 ]
- then
- require_cmd timeout || exit 1
+if [ $check_for_timeout -eq 1 ]; then
+ require_cmd timeout || exit 1
fi
# -----------------------------------------------------------------------------
# internal checks
# netdata passes the requested update frequency as the first argument
-update_every=$(( update_every + 1 - 1)) # makes sure it is a number
+update_every=$((update_every + 1 - 1)) # makes sure it is a number
test $update_every -eq 0 && update_every=1 # if it is zero, make it 1
# check the charts.d directory
@@ -284,39 +269,37 @@ test $update_every -eq 0 && update_every=1 # if it is zero, make it 1
# library functions
fixid() {
- echo "$*" |\
- tr -c "[A-Z][a-z][0-9]" "_" |\
- sed -e "s|^_\+||g" -e "s|_\+$||g" -e "s|_\+|_|g" |\
- tr "[A-Z]" "[a-z]"
+ echo "$*" |
+ tr -c "[A-Z][a-z][0-9]" "_" |
+ sed -e "s|^_\+||g" -e "s|_\+$||g" -e "s|_\+|_|g" |
+ tr "[A-Z]" "[a-z]"
}
run() {
- local ret pid="${BASHPID}" t
-
- if [ "z${1}" = "z-t" -a "${2}" != "0" ]
- then
- t="${2}"
- shift 2
- timeout ${t} "${@}" 2>"${TMP_DIR}/run.${pid}"
- ret=$?
- else
- "${@}" 2>"${TMP_DIR}/run.${pid}"
- ret=$?
- fi
-
- if [ ${ret} -ne 0 ]
- then
- {
- printf "$(logdate): ${PROGRAM_NAME}: ${status}: ${MODULE_NAME}: command '"
- printf "%q " "${@}"
- printf "' failed with code ${ret}:\n --- BEGIN TRACE ---\n"
- cat "${TMP_DIR}/run.${pid}"
- printf " --- END TRACE ---\n"
- } >&2
- fi
- rm "${TMP_DIR}/run.${pid}"
-
- return ${ret}
+ local ret pid="${BASHPID}" t
+
+ if [ "z${1}" = "z-t" -a "${2}" != "0" ]; then
+ t="${2}"
+ shift 2
+ timeout ${t} "${@}" 2>"${TMP_DIR}/run.${pid}"
+ ret=$?
+ else
+ "${@}" 2>"${TMP_DIR}/run.${pid}"
+ ret=$?
+ fi
+
+ if [ ${ret} -ne 0 ]; then
+ {
+ printf "$(logdate): ${PROGRAM_NAME}: ${status}: ${MODULE_NAME}: command '"
+ printf "%q " "${@}"
+ printf "' failed with code ${ret}:\n --- BEGIN TRACE ---\n"
+ cat "${TMP_DIR}/run.${pid}"
+ printf " --- END TRACE ---\n"
+ } >&2
+ fi
+ rm "${TMP_DIR}/run.${pid}"
+
+ return ${ret}
}
# convert any floating point number
@@ -325,166 +308,155 @@ run() {
# so that no fork is necessary
# the multiplier must be a power of 10
float2int() {
- local f m="$2" a b l v=($1)
- f=${v[0]}
-
- # the length of the multiplier - 1
- l=$(( ${#m} - 1 ))
-
- # check if the number is in scientific notation
- if [[ ${f} =~ ^[[:space:]]*(-)?[0-9.]+(e|E)(\+|-)[0-9]+ ]]
- then
- # convert it to decimal
- # unfortunately, this fork cannot be avoided
- # if you know of a way to avoid it, please let me know
- f=$(printf "%0.${l}f" ${f})
- fi
-
- # split the floating point number
- # in integer (a) and decimal (b)
- a=${f/.*/}
- b=${f/*./}
-
- # if the integer part is missing
- # set it to zero
- [ -z "${a}" ] && a="0"
-
- # strip leading zeros from the integer part
- # base 10 convertion
- a=$((10#$a))
-
- # check the length of the decimal part
- # against the length of the multiplier
- if [ ${#b} -gt ${l} ]
- then
- # too many digits - take the most significant
- b=${b:0:${l}}
-
- elif [ ${#b} -lt ${l} ]
- then
- # too few digits - pad with zero on the right
- local z="00000000000000000000000" r=$((l - ${#b}))
- b="${b}${z:0:${r}}"
- fi
-
- # strip leading zeros from the decimal part
- # base 10 convertion
- b=$((10#$b))
-
- # store the result
- FLOAT2INT_RESULT=$(( (a * m) + b ))
+ local f m="$2" a b l v=($1)
+ f=${v[0]}
+
+ # the length of the multiplier - 1
+ l=$((${#m} - 1))
+
+ # check if the number is in scientific notation
+ if [[ ${f} =~ ^[[:space:]]*(-)?[0-9.]+(e|E)(\+|-)[0-9]+ ]]; then
+ # convert it to decimal
+ # unfortunately, this fork cannot be avoided
+ # if you know of a way to avoid it, please let me know
+ f=$(printf "%0.${l}f" ${f})
+ fi
+
+ # split the floating point number
+ # in integer (a) and decimal (b)
+ a=${f/.*/}
+ b=${f/*./}
+
+ # if the integer part is missing
+ # set it to zero
+ [ -z "${a}" ] && a="0"
+
+ # strip leading zeros from the integer part
+ # base 10 convertion
+ a=$((10#$a))
+
+ # check the length of the decimal part
+ # against the length of the multiplier
+ if [ ${#b} -gt ${l} ]; then
+ # too many digits - take the most significant
+ b=${b:0:l}
+
+ elif [ ${#b} -lt ${l} ]; then
+ # too few digits - pad with zero on the right
+ local z="00000000000000000000000" r=$((l - ${#b}))
+ b="${b}${z:0:r}"
+ fi
+
+ # strip leading zeros from the decimal part
+ # base 10 convertion
+ b=$((10#$b))
+
+ # store the result
+ FLOAT2INT_RESULT=$(((a * m) + b))
}
-
# -----------------------------------------------------------------------------
# charts check functions
all_charts() {
- cd "$chartsd"
- [ $? -ne 0 ] && error "cannot cd to $chartsd" && return 1
+ cd "$chartsd"
+ [ $? -ne 0 ] && error "cannot cd to $chartsd" && return 1
- ls *.chart.sh | sed "s/\.chart\.sh$//g"
+ ls *.chart.sh | sed "s/\.chart\.sh$//g"
}
declare -A charts_enable_keyword=(
- ['apache']="force"
- ['cpu_apps']="force"
- ['cpufreq']="force"
- ['example']="force"
- ['exim']="force"
- ['hddtemp']="force"
- ['load_average']="force"
- ['mem_apps']="force"
- ['mysql']="force"
- ['nginx']="force"
- ['phpfpm']="force"
- ['postfix']="force"
- ['sensors']="force"
- ['squid']="force"
- ['tomcat']="force"
- )
+ ['apache']="force"
+ ['cpu_apps']="force"
+ ['cpufreq']="force"
+ ['example']="force"
+ ['exim']="force"
+ ['hddtemp']="force"
+ ['load_average']="force"
+ ['mem_apps']="force"
+ ['mysql']="force"
+ ['nginx']="force"
+ ['phpfpm']="force"
+ ['postfix']="force"
+ ['sensors']="force"
+ ['squid']="force"
+ ['tomcat']="force"
+)
all_enabled_charts() {
- local charts= enabled= required=
-
- # find all enabled charts
-
- for chart in $( all_charts )
- do
- MODULE_NAME="${chart}"
-
- eval "enabled=\$$chart"
- if [ -z "${enabled}" ]
- then
- enabled="${enable_all_charts}"
- fi
-
- required="${charts_enable_keyword[${chart}]}"
- [ -z "${required}" ] && required="yes"
-
- if [ ! "${enabled}" = "${required}" ]
- then
- info "is disabled. Add a line with $chart=$required in '${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}.conf' to enable it (or remove the line that disables it)."
- else
- debug "is enabled for auto-detection."
- local charts="$charts $chart"
- fi
- done
- MODULE_NAME="main"
-
- local charts2=
- for chart in $charts
- do
- MODULE_NAME="${chart}"
-
- # check the enabled charts
- local check="$( cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_check()" )"
- if [ -z "$check" ]
- then
- error "module '$chart' does not seem to have a $chart$charts_check() function. Disabling it."
- continue
- fi
-
- local create="$( cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_create()" )"
- if [ -z "$create" ]
- then
- error "module '$chart' does not seem to have a $chart$charts_create() function. Disabling it."
- continue
- fi
-
- local update="$( cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_update()" )"
- if [ -z "$update" ]
- then
- error "module '$chart' does not seem to have a $chart$charts_update() function. Disabling it."
- continue
- fi
-
- # check its config
- #if [ -f "$userconfd/$chart.conf" ]
- #then
- # if [ ! -z "$( cat "$userconfd/$chart.conf" | sed "s/^ \+//g" | grep -v "^$" | grep -v "^#" | grep -v "^$chart$charts_undescore" )" ]
- # then
- # error "module's $chart config $userconfd/$chart.conf should only have lines starting with $chart$charts_undescore . Disabling it."
- # continue
- # fi
- #fi
-
- #if [ $dryrunner -eq 1 ]
- # then
- # "$pluginsd/charts.d.dryrun-helper.sh" "$chart" "$chartsd/$chart.chart.sh" "$userconfd/$chart.conf" >/dev/null
- # if [ $? -ne 0 ]
- # then
- # error "module's $chart did not pass the dry run check. This means it uses global variables not starting with $chart. Disabling it."
- # continue
- # fi
- #fi
-
- local charts2="$charts2 $chart"
- done
- MODULE_NAME="main"
-
- echo $charts2
- debug "enabled charts: $charts2"
+ local charts= enabled= required=
+
+ # find all enabled charts
+
+ for chart in $(all_charts); do
+ MODULE_NAME="${chart}"
+
+ eval "enabled=\$$chart"
+ if [ -z "${enabled}" ]; then
+ enabled="${enable_all_charts}"
+ fi
+
+ required="${charts_enable_keyword[${chart}]}"
+ [ -z "${required}" ] && required="yes"
+
+ if [ ! "${enabled}" = "${required}" ]; then
+ info "is disabled. Add a line with $chart=$required in '${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}.conf' to enable it (or remove the line that disables it)."
+ else
+ debug "is enabled for auto-detection."
+ local charts="$charts $chart"
+ fi
+ done
+ MODULE_NAME="main"
+
+ local charts2=
+ for chart in $charts; do
+ MODULE_NAME="${chart}"
+
+ # check the enabled charts
+ local check="$(cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_check()")"
+ if [ -z "$check" ]; then
+ error "module '$chart' does not seem to have a $chart$charts_check() function. Disabling it."
+ continue
+ fi
+
+ local create="$(cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_create()")"
+ if [ -z "$create" ]; then
+ error "module '$chart' does not seem to have a $chart$charts_create() function. Disabling it."
+ continue
+ fi
+
+ local update="$(cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_update()")"
+ if [ -z "$update" ]; then
+ error "module '$chart' does not seem to have a $chart$charts_update() function. Disabling it."
+ continue
+ fi
+
+ # check its config
+ #if [ -f "$userconfd/$chart.conf" ]
+ #then
+ # if [ ! -z "$( cat "$userconfd/$chart.conf" | sed "s/^ \+//g" | grep -v "^$" | grep -v "^#" | grep -v "^$chart$charts_undescore" )" ]
+ # then
+ # error "module's $chart config $userconfd/$chart.conf should only have lines starting with $chart$charts_undescore . Disabling it."
+ # continue
+ # fi
+ #fi
+
+ #if [ $dryrunner -eq 1 ]
+ # then
+ # "$pluginsd/charts.d.dryrun-helper.sh" "$chart" "$chartsd/$chart.chart.sh" "$userconfd/$chart.conf" >/dev/null
+ # if [ $? -ne 0 ]
+ # then
+ # error "module's $chart did not pass the dry run check. This means it uses global variables not starting with $chart. Disabling it."
+ # continue
+ # fi
+ #fi
+
+ local charts2="$charts2 $chart"
+ done
+ MODULE_NAME="main"
+
+ echo $charts2
+ debug "enabled charts: $charts2"
}
# -----------------------------------------------------------------------------
@@ -493,63 +465,56 @@ all_enabled_charts() {
suffix_retries="_retries"
suffix_update_every="_update_every"
active_charts=
-for chart in $( all_enabled_charts )
-do
- MODULE_NAME="${chart}"
-
- debug "loading module: '$chartsd/$chart.chart.sh'"
-
- source "$chartsd/$chart.chart.sh"
- [ $? -ne 0 ] && warning "Module '$chartsd/$chart.chart.sh' loaded with errors."
-
- # first load the stock config
- if [ -f "$stockconfd/$chart.conf" ]
- then
- debug "loading module configuration: '$stockconfd/$chart.conf'"
- source "$stockconfd/$chart.conf"
- [ $? -ne 0 ] && warning "Config file '$stockconfd/$chart.conf' loaded with errors."
- else
- debug "not found module configuration: '$stockconfd/$chart.conf'"
- fi
-
- # then load the user config (it overwrites the stock)
- if [ -f "$userconfd/$chart.conf" ]
- then
- debug "loading module configuration: '$userconfd/$chart.conf'"
- source "$userconfd/$chart.conf"
- [ $? -ne 0 ] && warning "Config file '$userconfd/$chart.conf' loaded with errors."
- else
- debug "not found module configuration: '$userconfd/$chart.conf'"
-
- if [ -f "$olduserconfd/$chart.conf" ]
- then
- # support for very old netdata that had the charts.d module configs in /etc/netdata
- info "loading module configuration from obsolete location: '$olduserconfd/$chart.conf'"
- source "$olduserconfd/$chart.conf"
- [ $? -ne 0 ] && warning "Config file '$olduserconfd/$chart.conf' loaded with errors."
- fi
- fi
-
- eval "dt=\$$chart$suffix_update_every"
- dt=$(( dt + 1 - 1 )) # make sure it is a number
- if [ $dt -lt $update_every ]
- then
- eval "$chart$suffix_update_every=$update_every"
- fi
-
- $chart$charts_check
- if [ $? -eq 0 ]
- then
- debug "module '$chart' activated"
- active_charts="$active_charts $chart"
- else
- error "module's '$chart' check() function reports failure."
- fi
+for chart in $(all_enabled_charts); do
+ MODULE_NAME="${chart}"
+
+ debug "loading module: '$chartsd/$chart.chart.sh'"
+
+ source "$chartsd/$chart.chart.sh"
+ [ $? -ne 0 ] && warning "Module '$chartsd/$chart.chart.sh' loaded with errors."
+
+ # first load the stock config
+ if [ -f "$stockconfd/$chart.conf" ]; then
+ debug "loading module configuration: '$stockconfd/$chart.conf'"
+ source "$stockconfd/$chart.conf"
+ [ $? -ne 0 ] && warning "Config file '$stockconfd/$chart.conf' loaded with errors."
+ else
+ debug "not found module configuration: '$stockconfd/$chart.conf'"
+ fi
+
+ # then load the user config (it overwrites the stock)
+ if [ -f "$userconfd/$chart.conf" ]; then
+ debug "loading module configuration: '$userconfd/$chart.conf'"
+ source "$userconfd/$chart.conf"
+ [ $? -ne 0 ] && warning "Config file '$userconfd/$chart.conf' loaded with errors."
+ else
+ debug "not found module configuration: '$userconfd/$chart.conf'"
+
+ if [ -f "$olduserconfd/$chart.conf" ]; then
+ # support for very old netdata that had the charts.d module configs in /etc/netdata
+ info "loading module configuration from obsolete location: '$olduserconfd/$chart.conf'"
+ source "$olduserconfd/$chart.conf"
+ [ $? -ne 0 ] && warning "Config file '$olduserconfd/$chart.conf' loaded with errors."
+ fi
+ fi
+
+ eval "dt=\$$chart$suffix_update_every"
+ dt=$((dt + 1 - 1)) # make sure it is a number
+ if [ $dt -lt $update_every ]; then
+ eval "$chart$suffix_update_every=$update_every"
+ fi
+
+ $chart$charts_check
+ if [ $? -eq 0 ]; then
+ debug "module '$chart' activated"
+ active_charts="$active_charts $chart"
+ else
+ error "module's '$chart' check() function reports failure."
+ fi
done
MODULE_NAME="main"
debug "activated modules: $active_charts"
-
# -----------------------------------------------------------------------------
# check overwrites
@@ -558,28 +523,24 @@ debug_time=
test $debug -eq 1 && debug_time=tellwork
# if we only need a specific chart, remove all the others
-if [ ! -z "${chart_only}" ]
-then
- debug "requested to run only for: '${chart_only}'"
- check_charts=
- for chart in $active_charts
- do
- if [ "$chart" = "$chart_only" ]
- then
- check_charts="$chart"
- break
- fi
- done
- active_charts="$check_charts"
+if [ ! -z "${chart_only}" ]; then
+ debug "requested to run only for: '${chart_only}'"
+ check_charts=
+ for chart in $active_charts; do
+ if [ "$chart" = "$chart_only" ]; then
+ check_charts="$chart"
+ break
+ fi
+ done
+ active_charts="$check_charts"
fi
debug "activated charts: $active_charts"
# stop if we just need a pre-check
-if [ $check -eq 1 ]
-then
- info "CHECK RESULT"
- info "Will run the charts: $active_charts"
- exit 0
+if [ $check -eq 1 ]; then
+ info "CHECK RESULT"
+ info "Will run the charts: $active_charts"
+ exit 0
fi
# -----------------------------------------------------------------------------
@@ -590,24 +551,21 @@ cd "${TMP_DIR}" || exit 1
# create charts
run_charts=
-for chart in $active_charts
-do
- MODULE_NAME="${chart}"
-
- debug "calling '$chart$charts_create()'..."
- $chart$charts_create
- if [ $? -eq 0 ]
- then
- run_charts="$run_charts $chart"
- debug "'$chart' initialized."
- else
- error "module's '$chart' function '$chart$charts_create()' reports failure."
- fi
+for chart in $active_charts; do
+ MODULE_NAME="${chart}"
+
+ debug "calling '$chart$charts_create()'..."
+ $chart$charts_create
+ if [ $? -eq 0 ]; then
+ run_charts="$run_charts $chart"
+ debug "'$chart' initialized."
+ else
+ error "module's '$chart' function '$chart$charts_create()' reports failure."
+ fi
done
MODULE_NAME="main"
debug "run_charts='$run_charts'"
-
# -----------------------------------------------------------------------------
# update dimensions
@@ -615,129 +573,119 @@ debug "run_charts='$run_charts'"
declare -A charts_last_update=() charts_update_every=() charts_retries=() charts_next_update=() charts_run_counter=() charts_serial_failures=()
global_update() {
- local exit_at \
- c=0 dt ret last_ms exec_start_ms exec_end_ms \
- chart now_charts=() next_charts=($run_charts) \
- next_ms x seconds millis
-
- # return the current time in ms in $now_ms
- ${current_time_ms}
-
- exit_at=$(( now_ms + (restart_timeout * 1000) ))
-
- for chart in $run_charts
- do
- eval "charts_update_every[$chart]=\$$chart$suffix_update_every"
- test -z "${charts_update_every[$chart]}" && charts_update_every[$chart]=$update_every
-
- eval "charts_retries[$chart]=\$$chart$suffix_retries"
- test -z "${charts_retries[$chart]}" && charts_retries[$chart]=10
-
- charts_last_update[$chart]=$((now_ms - (now_ms % (charts_update_every[$chart] * 1000) ) ))
- charts_next_update[$chart]=$(( charts_last_update[$chart] + (charts_update_every[$chart] * 1000) ))
- charts_run_counter[$chart]=0
- charts_serial_failures[$chart]=0
-
- echo "CHART netdata.plugin_chartsd_$chart '' 'Execution time for $chart plugin' 'milliseconds / run' charts.d netdata.plugin_charts area 145000 ${charts_update_every[$chart]}"
- echo "DIMENSION run_time 'run time' absolute 1 1"
- done
-
- # the main loop
- while [ "${#next_charts[@]}" -gt 0 ]
- do
- c=$((c + 1))
- now_charts=("${next_charts[@]}")
- next_charts=()
-
- # return the current time in ms in $now_ms
- ${current_time_ms}
-
- for chart in "${now_charts[@]}"
- do
- MODULE_NAME="${chart}"
-
- if [ ${now_ms} -ge ${charts_next_update[$chart]} ]
- then
- last_ms=${charts_last_update[$chart]}
- dt=$(( (now_ms - last_ms) ))
-
- charts_last_update[$chart]=${now_ms}
-
- while [ ${charts_next_update[$chart]} -lt ${now_ms} ]
- do
- charts_next_update[$chart]=$(( charts_next_update[$chart] + (charts_update_every[$chart] * 1000) ))
- done
-
- # the first call should not give a duration
- # so that netdata calibrates to current time
- dt=$(( dt * 1000 ))
- charts_run_counter[$chart]=$(( charts_run_counter[$chart] + 1 ))
- if [ ${charts_run_counter[$chart]} -eq 1 ]
- then
- dt=
- fi
-
- exec_start_ms=$now_ms
- $chart$charts_update $dt
- ret=$?
-
- # return the current time in ms in $now_ms
- ${current_time_ms}; exec_end_ms=$now_ms
-
- echo "BEGIN netdata.plugin_chartsd_$chart $dt"
- echo "SET run_time = $(( exec_end_ms - exec_start_ms ))"
- echo "END"
-
- if [ $ret -eq 0 ]
- then
- charts_serial_failures[$chart]=0
- next_charts+=($chart)
- else
- charts_serial_failures[$chart]=$(( charts_serial_failures[$chart] + 1 ))
-
- if [ ${charts_serial_failures[$chart]} -gt ${charts_retries[$chart]} ]
- then
- error "module's '$chart' update() function reported failure ${charts_serial_failures[$chart]} times. Disabling it."
- else
- error "module's '$chart' update() function reports failure. Will keep trying for a while."
- next_charts+=($chart)
- fi
- fi
- else
- next_charts+=($chart)
- fi
- done
- MODULE_NAME="${chart}"
-
- # wait the time you are required to
- next_ms=$((now_ms + (update_every * 1000 * 100) ))
- for x in "${charts_next_update[@]}"; do [ ${x} -lt ${next_ms} ] && next_ms=${x}; done
- next_ms=$((next_ms - now_ms))
-
- if [ ${LOOPSLEEPMS_HIGHRES} -eq 1 -a ${next_ms} -gt 0 ]
- then
- next_ms=$(( next_ms + current_time_ms_accuracy ))
- seconds=$(( next_ms / 1000 ))
- millis=$(( next_ms % 1000 ))
- if [ ${millis} -lt 10 ]
- then
- millis="00${millis}"
- elif [ ${millis} -lt 100 ]
- then
- millis="0${millis}"
- fi
-
- debug "sleeping for ${seconds}.${millis} seconds."
- ${mysleep} ${seconds}.${millis}
- else
- debug "sleeping for ${update_every} seconds."
- ${mysleep} $update_every
- fi
-
- test ${now_ms} -ge ${exit_at} && exit 0
- done
-
- fatal "nothing left to do, exiting..."
+ local exit_at \
+ c=0 dt ret last_ms exec_start_ms exec_end_ms \
+ chart now_charts=() next_charts=($run_charts) \
+ next_ms x seconds millis
+
+ # return the current time in ms in $now_ms
+ ${current_time_ms}
+
+ exit_at=$((now_ms + (restart_timeout * 1000)))
+
+ for chart in $run_charts; do
+ eval "charts_update_every[$chart]=\$$chart$suffix_update_every"
+ test -z "${charts_update_every[$chart]}" && charts_update_every[$chart]=$update_every
+
+ eval "charts_retries[$chart]=\$$chart$suffix_retries"
+ test -z "${charts_retries[$chart]}" && charts_retries[$chart]=10
+
+ charts_last_update[$chart]=$((now_ms - (now_ms % (charts_update_every[$chart] * 1000))))
+ charts_next_update[$chart]=$((charts_last_update[$chart] + (charts_update_every[$chart] * 1000)))
+ charts_run_counter[$chart]=0
+ charts_serial_failures[$chart]=0
+
+ echo "CHART netdata.plugin_chartsd_$chart '' 'Execution time for $chart plugin' 'milliseconds / run' charts.d netdata.plugin_charts area 145000 ${charts_update_every[$chart]}"
+ echo "DIMENSION run_time 'run time' absolute 1 1"
+ done
+
+ # the main loop
+ while [ "${#next_charts[@]}" -gt 0 ]; do
+ c=$((c + 1))
+ now_charts=("${next_charts[@]}")
+ next_charts=()
+
+ # return the current time in ms in $now_ms
+ ${current_time_ms}
+
+ for chart in "${now_charts[@]}"; do
+ MODULE_NAME="${chart}"
+
+ if [ ${now_ms} -ge ${charts_next_update[$chart]} ]; then
+ last_ms=${charts_last_update[$chart]}
+ dt=$((now_ms - last_ms))
+
+ charts_last_update[$chart]=${now_ms}
+
+ while [ ${charts_next_update[$chart]} -lt ${now_ms} ]; do
+ charts_next_update[$chart]=$((charts_next_update[$chart] + (charts_update_every[$chart] * 1000)))
+ done
+
+ # the first call should not give a duration
+ # so that netdata calibrates to current time
+ dt=$((dt * 1000))
+ charts_run_counter[$chart]=$((charts_run_counter[$chart] + 1))
+ if [ ${charts_run_counter[$chart]} -eq 1 ]; then
+ dt=
+ fi
+
+ exec_start_ms=$now_ms
+ $chart$charts_update $dt
+ ret=$?
+
+ # return the current time in ms in $now_ms
+ ${current_time_ms}
+ exec_end_ms=$now_ms
+
+ echo "BEGIN netdata.plugin_chartsd_$chart $dt"
+ echo "SET run_time = $((exec_end_ms - exec_start_ms))"
+ echo "END"
+
+ if [ $ret -eq 0 ]; then
+ charts_serial_failures[$chart]=0
+ next_charts+=($chart)
+ else
+ charts_serial_failures[$chart]=$((charts_serial_failures[$chart] + 1))
+
+ if [ ${charts_serial_failures[$chart]} -gt ${charts_retries[$chart]} ]; then
+ error "module's '$chart' update() function reported failure ${charts_serial_failures[$chart]} times. Disabling it."
+ else
+ error "module's '$chart' update() function reports failure. Will keep trying for a while."
+ next_charts+=($chart)
+ fi
+ fi
+ else
+ next_charts+=($chart)
+ fi
+ done
+ MODULE_NAME="${chart}"
+
+ # wait the time you are required to
+ next_ms=$((now_ms + (update_every * 1000 * 100)))
+ for x in "${charts_next_update[@]}"; do [ ${x} -lt ${next_ms} ] && next_ms=${x}; done
+ next_ms=$((next_ms - now_ms))
+
+ if [ ${LOOPSLEEPMS_HIGHRES} -eq 1 -a ${next_ms} -gt 0 ]; then
+ next_ms=$((next_ms + current_time_ms_accuracy))
+ seconds=$((next_ms / 1000))
+ millis=$((next_ms % 1000))
+ if [ ${millis} -lt 10 ]; then
+ millis="00${millis}"
+ elif [ ${millis} -lt 100 ]; then
+ millis="0${millis}"
+ fi
+
+ debug "sleeping for ${seconds}.${millis} seconds."
+ ${mysleep} ${seconds}.${millis}
+ else
+ debug "sleeping for ${update_every} seconds."
+ ${mysleep} $update_every
+ fi
+
+ test ${now_ms} -ge ${exit_at} && exit 0
+ done
+
+ fatal "nothing left to do, exiting..."
}
global_update
diff --git a/collectors/charts.d.plugin/cpu_apps/README.md b/collectors/charts.d.plugin/cpu_apps/README.md
index cd8adf0a..a32a6330 100644
--- a/collectors/charts.d.plugin/cpu_apps/README.md
+++ b/collectors/charts.d.plugin/cpu_apps/README.md
@@ -1,2 +1,6 @@
+# cpu_apps
+
> THIS MODULE IS OBSOLETE.
-> USE APPS.PLUGIN.
+> USE [APPS.PLUGIN](../../apps.plugin).
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fcpu_apps%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/charts.d.plugin/cpu_apps/cpu_apps.chart.sh b/collectors/charts.d.plugin/cpu_apps/cpu_apps.chart.sh
index 869464af..e91c46d5 100644
--- a/collectors/charts.d.plugin/cpu_apps/cpu_apps.chart.sh
+++ b/collectors/charts.d.plugin/cpu_apps/cpu_apps.chart.sh
@@ -23,8 +23,7 @@ cpu_apps_check() {
# - 0 to enable the chart
# - 1 to disable the chart
- if [ -z "$cpu_apps_apps" ]
- then
+ if [ -z "$cpu_apps_apps" ]; then
error "manual configuration required: please set cpu_apps_apps='command1 command2 ...' in $confd/cpu_apps_apps.conf"
return 1
fi
@@ -38,8 +37,7 @@ cpu_apps_create() {
echo "CHART chartsd_apps.cpu '' 'Apps CPU' 'milliseconds / $cpu_apps_update_every sec' apps apps stacked 20001 $cpu_apps_update_every"
local x=
- for x in $cpu_apps_apps
- do
+ for x in $cpu_apps_apps; do
echo "DIMENSION $x $x incremental 1000 $cpu_apps_clockticks"
# this string is needed later in the update() function
@@ -55,15 +53,15 @@ cpu_apps_update() {
# remember: KEEP IT SIMPLE AND SHORT
echo "BEGIN chartsd_apps.cpu"
- ps -o pid,comm -C "$cpu_apps_apps" |\
- grep -v "COMMAND" |\
+ ps -o pid,comm -C "$cpu_apps_apps" |
+ grep -v "COMMAND" |
(
- while read pid name
- do
- echo "$name+=`cat /proc/$pid/stat | cut -d ' ' -f 14-15`"
+ while read pid name; do
+ echo "$name+=$(cat /proc/$pid/stat | cut -d ' ' -f 14-15)"
done
- ) |\
- ( sed -e "s/ \+/ /g" -e "s/ /+/g";
+ ) |
+ (
+ sed -e "s/ \+/ /g" -e "s/ /+/g"
echo "$cpu_apps_bc_finalze"
) | bc
echo "END"
diff --git a/collectors/charts.d.plugin/cpufreq/README.md b/collectors/charts.d.plugin/cpufreq/README.md
index d82951aa..84883f58 100644
--- a/collectors/charts.d.plugin/cpufreq/README.md
+++ b/collectors/charts.d.plugin/cpufreq/README.md
@@ -1,2 +1,6 @@
+# cpufreq
+
> THIS MODULE IS OBSOLETE.
-> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
+> USE THE [PROC PLUGIN](../../proc.plugin) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fcpufreq%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/charts.d.plugin/cpufreq/cpufreq.chart.sh b/collectors/charts.d.plugin/cpufreq/cpufreq.chart.sh
index 1fc6caab..68708d91 100644
--- a/collectors/charts.d.plugin/cpufreq/cpufreq.chart.sh
+++ b/collectors/charts.d.plugin/cpufreq/cpufreq.chart.sh
@@ -30,7 +30,7 @@ cpufreq_check() {
# - 0 to enable the chart
# - 1 to disable the chart
- [ -z "$( cpufreq_find_all_files "$cpufreq_sys_dir" )" ] && return 1
+ [ -z "$(cpufreq_find_all_files "$cpufreq_sys_dir")" ] && return 1
return 0
}
@@ -47,16 +47,15 @@ cpufreq_create() {
echo >>"$TMP_DIR/cpufreq.sh" "echo \"BEGIN cpu.cpufreq \$1\""
i=0
- for file in $( cpufreq_find_all_files "$cpufreq_sys_dir" | sort -u )
- do
- i=$(( i + 1 ))
- dir=$( dirname "$file" )
+ for file in $(cpufreq_find_all_files "$cpufreq_sys_dir" | sort -u); do
+ i=$((i + 1))
+ dir=$(dirname "$file")
cpu=
- [ -f "$dir/affected_cpus" ] && cpu=$( cat "$dir/affected_cpus" )
+ [ -f "$dir/affected_cpus" ] && cpu=$(cat "$dir/affected_cpus")
[ -z "$cpu" ] && cpu="$i.a"
- id="$( fixid "cpu$cpu" )"
+ id="$(fixid "cpu$cpu")"
debug "file='$file', dir='$dir', cpu='$cpu', id='$id'"
@@ -68,7 +67,7 @@ cpufreq_create() {
[ $cpufreq_source_update -eq 1 ] && echo >>"$TMP_DIR/cpufreq.sh" "}"
# ok, load the function cpufreq_update() we created
- # shellcheck disable=SC1090
+ # shellcheck disable=SC1090
[ $cpufreq_source_update -eq 1 ] && . "$TMP_DIR/cpufreq.sh"
return 0
@@ -82,9 +81,8 @@ cpufreq_update() {
# do all the work to collect / calculate the values
# for each dimension
# remember: KEEP IT SIMPLE AND SHORT
- # shellcheck disable=SC1090
+ # shellcheck disable=SC1090
[ $cpufreq_source_update -eq 0 ] && . "$TMP_DIR/cpufreq.sh" "$1"
return 0
}
-
diff --git a/collectors/charts.d.plugin/example/README.md b/collectors/charts.d.plugin/example/README.md
index bfd5e210..e62f7677 100644
--- a/collectors/charts.d.plugin/example/README.md
+++ b/collectors/charts.d.plugin/example/README.md
@@ -1,2 +1,6 @@
+# Example
+
This is just an example charts.d data collector.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fexample%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/charts.d.plugin/example/example.chart.sh b/collectors/charts.d.plugin/example/example.chart.sh
index 1562c597..8bae570a 100644
--- a/collectors/charts.d.plugin/example/example.chart.sh
+++ b/collectors/charts.d.plugin/example/example.chart.sh
@@ -44,23 +44,20 @@ example_get() {
example_value1=$RANDOM
example_value2=$RANDOM
example_value3=$RANDOM
- example_value4=$((8192 + (RANDOM * 16383 / 32767) ))
+ example_value4=$((8192 + (RANDOM * 16383 / 32767)))
- if [ $example_count -gt 0 ]
- then
+ if [ $example_count -gt 0 ]; then
example_count=$((example_count - 1))
- [ $example_last -gt 16383 ] && example_value4=$((example_last + (RANDOM * ( (32767 - example_last) / 2) / 32767)))
+ [ $example_last -gt 16383 ] && example_value4=$((example_last + (RANDOM * ((32767 - example_last) / 2) / 32767)))
[ $example_last -le 16383 ] && example_value4=$((example_last - (RANDOM * (example_last / 2) / 32767)))
else
- example_count=$((1 + (RANDOM * 5 / 32767) ))
+ example_count=$((1 + (RANDOM * 5 / 32767)))
- if [ $example_last -gt 16383 ] && [ $example_value4 -gt 16383 ]
- then
+ if [ $example_last -gt 16383 ] && [ $example_value4 -gt 16383 ]; then
example_value4=$((example_value4 - 16383))
fi
- if [ $example_last -le 16383 ] && [ $example_value4 -lt 16383 ]
- then
+ if [ $example_last -le 16383 ] && [ $example_value4 -lt 16383 ]; then
example_value4=$((example_value4 + 16383))
fi
fi
diff --git a/collectors/charts.d.plugin/exim/README.md b/collectors/charts.d.plugin/exim/README.md
index d82951aa..b4c85389 100644
--- a/collectors/charts.d.plugin/exim/README.md
+++ b/collectors/charts.d.plugin/exim/README.md
@@ -1,2 +1,6 @@
+# exim
+
> THIS MODULE IS OBSOLETE.
-> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
+> USE [THE PYTHON ONE](../../python.d.plugin/exim) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fexim%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/charts.d.plugin/exim/exim.chart.sh b/collectors/charts.d.plugin/exim/exim.chart.sh
index 8099a724..7b0ef70d 100644
--- a/collectors/charts.d.plugin/exim/exim.chart.sh
+++ b/collectors/charts.d.plugin/exim/exim.chart.sh
@@ -17,14 +17,12 @@ exim_update_every=5
exim_priority=60000
exim_check() {
- if [ -z "${exim_command}" ]
- then
- require_cmd exim || return 1
- exim_command="${EXIM_CMD}"
- fi
-
- if [ "$(${exim_command} -bpc 2>&1 | grep -c denied)" -ne 0 ]
- then
+ if [ -z "${exim_command}" ]; then
+ require_cmd exim || return 1
+ exim_command="${EXIM_CMD}"
+ fi
+
+ if [ "$(${exim_command} -bpc 2>&1 | grep -c denied)" -ne 0 ]; then
error "permission denied - please set 'queue_list_requires_admin = false' in your exim options file"
return 1
fi
@@ -33,16 +31,16 @@ exim_check() {
}
exim_create() {
- cat <<EOF
+ cat <<EOF
CHART exim_local.qemails '' "Exim Queue Emails" "emails" queue exim.queued.emails line $((exim_priority + 1)) $exim_update_every
DIMENSION emails '' absolute 1 1
EOF
- return 0
+ return 0
}
exim_update() {
- echo "BEGIN exim_local.qemails $1"
- echo "SET emails = $(run "${exim_command}" -bpc)"
- echo "END"
- return 0
+ echo "BEGIN exim_local.qemails $1"
+ echo "SET emails = $(run "${exim_command}" -bpc)"
+ echo "END"
+ return 0
}
diff --git a/collectors/charts.d.plugin/hddtemp/README.md b/collectors/charts.d.plugin/hddtemp/README.md
index 98f18900..86a2e19e 100644
--- a/collectors/charts.d.plugin/hddtemp/README.md
+++ b/collectors/charts.d.plugin/hddtemp/README.md
@@ -1,8 +1,8 @@
-> THIS MODULE IS OBSOLETE.
-> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
-
# hddtemp
+> THIS MODULE IS OBSOLETE.
+> USE [THE PYTHON ONE](../../python.d.plugin/hddtemp) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
+
The plugin will collect temperatures from disks
It will create one chart with all active disks
@@ -26,3 +26,5 @@ hddtemp_disks=()
```
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fhddtemp%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/charts.d.plugin/hddtemp/hddtemp.chart.sh b/collectors/charts.d.plugin/hddtemp/hddtemp.chart.sh
index e9031098..a4cef3c3 100644
--- a/collectors/charts.d.plugin/hddtemp/hddtemp.chart.sh
+++ b/collectors/charts.d.plugin/hddtemp/hddtemp.chart.sh
@@ -21,7 +21,7 @@ hddtemp_priority=90000
# _check is called once, to find out if this chart should be enabled or not
hddtemp_check() {
- require_cmd nc || return 1
+ require_cmd nc || return 1
run nc $hddtemp_host $hddtemp_port && return 0 || return 1
}
@@ -29,17 +29,17 @@ hddtemp_check() {
hddtemp_create() {
if [ ${#hddtemp_disks[@]} -eq 0 ]; then
local all
- all=$(nc $hddtemp_host $hddtemp_port )
+ all=$(nc $hddtemp_host $hddtemp_port)
unset hddtemp_disks
# shellcheck disable=SC2190,SC2207
- hddtemp_disks=( $(grep -Po '/dev/[^|]+' <<< "$all" | cut -c 6-) )
+ hddtemp_disks=($(grep -Po '/dev/[^|]+' <<<"$all" | cut -c 6-))
fi
-# local disk_names
-# disk_names=(`sed -e 's/||/\n/g;s/^|//' <<< "$all" | cut -d '|' -f2 | tr ' ' '_'`)
+ # local disk_names
+ # disk_names=(`sed -e 's/||/\n/g;s/^|//' <<< "$all" | cut -d '|' -f2 | tr ' ' '_'`)
echo "CHART hddtemp.temperature 'disks_temp' 'temperature' 'Celsius' 'Disks temperature' 'hddtemp.temp' line $((hddtemp_priority)) $hddtemp_update_every"
- for i in $(seq 0 $((${#hddtemp_disks[@]}-1))); do
-# echo "DIMENSION ${hddtemp_disks[i]} ${disk_names[i]} absolute 1 1"
+ for i in $(seq 0 $((${#hddtemp_disks[@]} - 1))); do
+ # echo "DIMENSION ${hddtemp_disks[i]} ${disk_names[i]} absolute 1 1"
echo "DIMENSION ${hddtemp_disks[$i]} '' absolute 1 1"
done
return 0
@@ -49,12 +49,12 @@ hddtemp_create() {
#hddtemp_last=0
#hddtemp_count=0
hddtemp_update() {
-# local all=( `nc $hddtemp_host $hddtemp_port | sed -e 's/||/\n/g;s/^|//' | cut -d '|' -f3` )
-# local all=( `nc $hddtemp_host $hddtemp_port | awk 'BEGIN { FS="|" };{i=4; while (i <= NF) {print $i+0;i+=5;};}'` )
+ # local all=( `nc $hddtemp_host $hddtemp_port | sed -e 's/||/\n/g;s/^|//' | cut -d '|' -f3` )
+ # local all=( `nc $hddtemp_host $hddtemp_port | awk 'BEGIN { FS="|" };{i=4; while (i <= NF) {print $i+0;i+=5;};}'` )
OLD_IFS=$IFS
set -f
# shellcheck disable=SC2207
- IFS="|" all=( $(nc $hddtemp_host $hddtemp_port 2>/dev/null) )
+ IFS="|" all=($(nc $hddtemp_host $hddtemp_port 2>/dev/null))
set +f
IFS=$OLD_IFS
@@ -66,9 +66,9 @@ hddtemp_update() {
# write the result of the work.
echo "BEGIN hddtemp.temperature $1"
end=${#hddtemp_disks[@]}
- for ((i=0; i<end; i++)); do
+ for ((i = 0; i < end; i++)); do
# temperature - this will turn SLP to zero
- t=$(( ${all[ $((i * 5 + 3)) ]} ))
+ t=$((all[$((i * 5 + 3))]))
echo "SET ${hddtemp_disks[$i]} = $t"
done
echo "END"
diff --git a/collectors/charts.d.plugin/libreswan/README.md b/collectors/charts.d.plugin/libreswan/README.md
index 41026cf7..18c64507 100644
--- a/collectors/charts.d.plugin/libreswan/README.md
+++ b/collectors/charts.d.plugin/libreswan/README.md
@@ -40,3 +40,5 @@ netdata ALL = (root) NOPASSWD: /sbin/ipsec whack --trafficstatus
Make sure the path `/sbin/ipsec` matches your setup (execute `which ipsec` to find the right path).
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Flibreswan%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/charts.d.plugin/libreswan/libreswan.chart.sh b/collectors/charts.d.plugin/libreswan/libreswan.chart.sh
index 6e29f847..1a8f90b1 100644
--- a/collectors/charts.d.plugin/libreswan/libreswan.chart.sh
+++ b/collectors/charts.d.plugin/libreswan/libreswan.chart.sh
@@ -39,8 +39,7 @@ declare -A libreswan_tunnel_charts=()
# run the ipsec command
libreswan_ipsec() {
- if [ ${libreswan_sudo} -ne 0 ]
- then
+ if [ ${libreswan_sudo} -ne 0 ]; then
sudo -n "${IPSEC_CMD}" "${@}"
return $?
else
@@ -61,15 +60,15 @@ libreswan_get() {
libreswan_connected_tunnels=()
# convert the ipsec command output to a shell script
- # and source it to get the values
- # shellcheck disable=SC1090
+ # and source it to get the values
+ # shellcheck disable=SC1090
source <(
{
- libreswan_ipsec whack --status;
- libreswan_ipsec whack --trafficstatus;
+ libreswan_ipsec whack --status
+ libreswan_ipsec whack --trafficstatus
} | sed -n \
- -e "s|[0-9]\+ #\([0-9]\+\): \"\(.*\)\".*IPsec SA established.*newest IPSEC.*|libreswan_connected_tunnels[\"\1\"]=\"\2\"|p" \
- -e "s|[0-9]\+ #\([0-9]\+\): \"\(.*\)\",.* add_time=\([0-9]\+\),.* inBytes=\([0-9]\+\),.* outBytes=\([0-9]\+\).*|libreswan_traffic_in[\"\1\"]=\"\4\"; libreswan_traffic_out[\"\1\"]=\"\5\"; libreswan_established_add_time[\"\1\"]=\"\3\";|p"
+ -e "s|[0-9]\+ #\([0-9]\+\): \"\(.*\)\".*IPsec SA established.*newest IPSEC.*|libreswan_connected_tunnels[\"\1\"]=\"\2\"|p" \
+ -e "s|[0-9]\+ #\([0-9]\+\): \"\(.*\)\",.* add_time=\([0-9]\+\),.* inBytes=\([0-9]\+\),.* outBytes=\([0-9]\+\).*|libreswan_traffic_in[\"\1\"]=\"\4\"; libreswan_traffic_out[\"\1\"]=\"\5\"; libreswan_established_add_time[\"\1\"]=\"\3\";|p"
) || return 1
# check we got some data
@@ -88,10 +87,9 @@ libreswan_check() {
# make sure it is libreswan
# shellcheck disable=SC2143
- if [ -z "$(ipsec --version | grep -i libreswan)" ]
- then
- error "ipsec command is not Libreswan. Disabling Libreswan plugin."
- return 1
+ if [ -z "$(ipsec --version | grep -i libreswan)" ]; then
+ error "ipsec command is not Libreswan. Disabling Libreswan plugin."
+ return 1
fi
# check that we can collect data
@@ -125,8 +123,7 @@ EOF
# _create is called once, to create the charts
libreswan_create() {
local n
- for n in "${!libreswan_connected_tunnels[@]}"
- do
+ for n in "${!libreswan_connected_tunnels[@]}"; do
libreswan_create_one "${n}"
done
return 0
@@ -143,10 +140,10 @@ libreswan_update_one() {
[ -z "${id}" ] && libreswan_create_one "${name}"
- uptime=$(( libreswan_now - libreswan_established_add_time[${n}] ))
+ uptime=$((libreswan_now - libreswan_established_add_time[${n}]))
[ ${uptime} -lt 0 ] && uptime=0
- # write the result of the work.
+ # write the result of the work.
cat <<VALUESEOF
BEGIN libreswan.${id}_net ${microseconds}
SET in = ${libreswan_traffic_in[${n}]}
@@ -167,8 +164,7 @@ libreswan_update() {
libreswan_now=$(date +%s)
local n
- for n in "${!libreswan_connected_tunnels[@]}"
- do
+ for n in "${!libreswan_connected_tunnels[@]}"; do
libreswan_update_one "${n}" "${@}"
done
diff --git a/collectors/charts.d.plugin/load_average/README.md b/collectors/charts.d.plugin/load_average/README.md
index 39d3b818..ef84b5bd 100644
--- a/collectors/charts.d.plugin/load_average/README.md
+++ b/collectors/charts.d.plugin/load_average/README.md
@@ -1,2 +1,6 @@
+# load_average
+
> THIS MODULE IS OBSOLETE.
> THE NETDATA DAEMON COLLECTS LOAD AVERAGE BY ITSELF
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fload_average%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/charts.d.plugin/load_average/load_average.chart.sh b/collectors/charts.d.plugin/load_average/load_average.chart.sh
index b30cb850..841e3d9f 100644
--- a/collectors/charts.d.plugin/load_average/load_average.chart.sh
+++ b/collectors/charts.d.plugin/load_average/load_average.chart.sh
@@ -21,8 +21,7 @@ load_average_check() {
# - 0 to enable the chart
# - 1 to disable the chart
- if [ ${load_average_update_every} -lt 5 ]
- then
+ if [ ${load_average_update_every} -lt 5 ]; then
# there is no meaning for shorter than 5 seconds
# the kernel changes this value every 5 seconds
load_average_update_every=5
@@ -34,7 +33,7 @@ load_average_check() {
load_average_create() {
# create a chart with 3 dimensions
-cat <<EOF
+ cat <<EOF
CHART system.load '' "System Load Average" "load" load system.load line $((load_priority + 1)) $load_average_update_every
DIMENSION load1 '1 min' absolute 1 100
DIMENSION load5 '5 mins' absolute 1 100
@@ -52,10 +51,10 @@ load_average_update() {
# here we parse the system average load
# it is decimal (with 2 decimal digits), so we remove the dot and
# at the definition we have divisor = 100, to have the graph show the right value
- loadavg="`cat /proc/loadavg | sed -e "s/\.//g"`"
- load1=`echo $loadavg | cut -d ' ' -f 1`
- load5=`echo $loadavg | cut -d ' ' -f 2`
- load15=`echo $loadavg | cut -d ' ' -f 3`
+ loadavg="$(cat /proc/loadavg | sed -e "s/\.//g")"
+ load1=$(echo $loadavg | cut -d ' ' -f 1)
+ load5=$(echo $loadavg | cut -d ' ' -f 2)
+ load15=$(echo $loadavg | cut -d ' ' -f 3)
# write the result of the work.
cat <<VALUESEOF
@@ -68,4 +67,3 @@ VALUESEOF
return 0
}
-
diff --git a/collectors/charts.d.plugin/loopsleepms.sh.inc b/collectors/charts.d.plugin/loopsleepms.sh.inc
index bdc032b9..e44eff68 100644
--- a/collectors/charts.d.plugin/loopsleepms.sh.inc
+++ b/collectors/charts.d.plugin/loopsleepms.sh.inc
@@ -2,10 +2,9 @@
# SPDX-License-Identifier: GPL-3.0-or-later
LOOPSLEEP_DATE="$(which date 2>/dev/null || command -v date 2>/dev/null)"
-if [ -z "$LOOPSLEEP_DATE" ]
- then
- echo >&2 "$0: ERROR: Cannot find the command 'date' in the system path."
- exit 1
+if [ -z "$LOOPSLEEP_DATE" ]; then
+ echo >&2 "$0: ERROR: Cannot find the command 'date' in the system path."
+ exit 1
fi
# -----------------------------------------------------------------------------
@@ -14,14 +13,13 @@ fi
now_ms=
LOOPSLEEPMS_HIGHRES=1
test "$($LOOPSLEEP_DATE +%N)" = "%N" && LOOPSLEEPMS_HIGHRES=0
-test -z "$($LOOPSLEEP_DATE +%N)" && LOOPSLEEPMS_HIGHRES=0
+test -z "$($LOOPSLEEP_DATE +%N)" && LOOPSLEEPMS_HIGHRES=0
current_time_ms_from_date() {
- if [ $LOOPSLEEPMS_HIGHRES -eq 0 ]
- then
- now_ms="$($LOOPSLEEP_DATE +'%s')000"
- else
- now_ms="$(( $( $LOOPSLEEP_DATE +'%s * 1000 + %-N / 1000000' ) ))"
- fi
+ if [ $LOOPSLEEPMS_HIGHRES -eq 0 ]; then
+ now_ms="$($LOOPSLEEP_DATE +'%s')000"
+ else
+ now_ms="$(($($LOOPSLEEP_DATE +'%s * 1000 + %-N / 1000000')))"
+ fi
}
# -----------------------------------------------------------------------------
@@ -32,55 +30,49 @@ current_time_ms_from_uptime_started="${now_ms}"
current_time_ms_from_uptime_last="${now_ms}"
current_time_ms_from_uptime_first=0
current_time_ms_from_uptime() {
- local up rest arr=() n
-
- read up rest </proc/uptime
- if [ $? -ne 0 ]
- then
- echo >&2 "$0: Cannot read /proc/uptime - falling back to current_time_ms_from_date()."
- current_time_ms="current_time_ms_from_date"
- current_time_ms_from_date
- current_time_ms_accuracy=1
- return
- fi
-
- arr=(${up//./ })
-
- if [ ${#arr[1]} -lt 1 ]
- then
- n="${arr[0]}000"
- elif [ ${#arr[1]} -lt 2 ]
- then
- n="${arr[0]}${arr[1]}00"
- elif [ ${#arr[1]} -lt 3 ]
- then
- n="${arr[0]}${arr[1]}0"
- else
- n="${arr[0]}${arr[1]}"
- fi
-
- now_ms=$((current_time_ms_from_uptime_started - current_time_ms_from_uptime_first + n))
-
- if [ "${now_ms}" -lt "${current_time_ms_from_uptime_last}" ]
- then
- echo >&2 "$0: Cannot use current_time_ms_from_uptime() - new time ${now_ms} is older than the last ${current_time_ms_from_uptime_last} - falling back to current_time_ms_from_date()."
- current_time_ms="current_time_ms_from_date"
- current_time_ms_from_date
- current_time_ms_accuracy=1
- fi
-
- current_time_ms_from_uptime_last="${now_ms}"
+ local up rest arr=() n
+
+ read up rest </proc/uptime
+ if [ $? -ne 0 ]; then
+ echo >&2 "$0: Cannot read /proc/uptime - falling back to current_time_ms_from_date()."
+ current_time_ms="current_time_ms_from_date"
+ current_time_ms_from_date
+ current_time_ms_accuracy=1
+ return
+ fi
+
+ arr=(${up//./ })
+
+ if [ ${#arr[1]} -lt 1 ]; then
+ n="${arr[0]}000"
+ elif [ ${#arr[1]} -lt 2 ]; then
+ n="${arr[0]}${arr[1]}00"
+ elif [ ${#arr[1]} -lt 3 ]; then
+ n="${arr[0]}${arr[1]}0"
+ else
+ n="${arr[0]}${arr[1]}"
+ fi
+
+ now_ms=$((current_time_ms_from_uptime_started - current_time_ms_from_uptime_first + n))
+
+ if [ "${now_ms}" -lt "${current_time_ms_from_uptime_last}" ]; then
+ echo >&2 "$0: Cannot use current_time_ms_from_uptime() - new time ${now_ms} is older than the last ${current_time_ms_from_uptime_last} - falling back to current_time_ms_from_date()."
+ current_time_ms="current_time_ms_from_date"
+ current_time_ms_from_date
+ current_time_ms_accuracy=1
+ fi
+
+ current_time_ms_from_uptime_last="${now_ms}"
}
current_time_ms_from_uptime
current_time_ms_from_uptime_first="$((now_ms - current_time_ms_from_uptime_started))"
current_time_ms_from_uptime_last="${current_time_ms_from_uptime_first}"
current_time_ms="current_time_ms_from_uptime"
current_time_ms_accuracy=10
-if [ "${current_time_ms_from_uptime_first}" -eq 0 ]
- then
- echo >&2 "$0: Invalid setup for current_time_ms_from_uptime() - falling back to current_time_ms_from_date()."
- current_time_ms="current_time_ms_from_date"
- current_time_ms_accuracy=1
+if [ "${current_time_ms_from_uptime_first}" -eq 0 ]; then
+ echo >&2 "$0: Invalid setup for current_time_ms_from_uptime() - falling back to current_time_ms_from_date()."
+ current_time_ms="current_time_ms_from_date"
+ current_time_ms_accuracy=1
fi
# -----------------------------------------------------------------------------
@@ -94,55 +86,48 @@ mysleep_fifo="${NETDATA_CACHE_DIR-/tmp}/.netdata_bash_sleep_timer_fifo"
[ -p "${mysleep_fifo}" ] && mysleep="mysleep_read"
mysleep_read() {
- read -t "${1}" <>"${mysleep_fifo}"
- ret=$?
- if [ $ret -le 128 ]
- then
- echo >&2 "$0: Cannot use read for sleeping (return code ${ret})."
- mysleep="sleep"
- ${mysleep} "${1}"
- fi
+ read -t "${1}" <>"${mysleep_fifo}"
+ ret=$?
+ if [ $ret -le 128 ]; then
+ echo >&2 "$0: Cannot use read for sleeping (return code ${ret})."
+ mysleep="sleep"
+ ${mysleep} "${1}"
+ fi
}
# -----------------------------------------------------------------------------
# use bash loadable module for sleep
mysleep_builtin() {
- builtin sleep "${1}"
- ret=$?
- if [ $ret -ne 0 ]
- then
- echo >&2 "$0: Cannot use builtin sleep for sleeping (return code ${ret})."
- mysleep="sleep"
- ${mysleep} "${1}"
- fi
+ builtin sleep "${1}"
+ ret=$?
+ if [ $ret -ne 0 ]; then
+ echo >&2 "$0: Cannot use builtin sleep for sleeping (return code ${ret})."
+ mysleep="sleep"
+ ${mysleep} "${1}"
+ fi
}
-if [ -z "${mysleep}" -a "$((BASH_VERSINFO[0] +0))" -ge 3 -a "${NETDATA_BASH_LOADABLES}" != "DISABLE" ]
- then
- # enable modules only for bash version 3+
-
- for bash_modules_path in ${BASH_LOADABLES_PATH//:/ } "$(pkg-config bash --variable=loadablesdir 2>/dev/null)" "/usr/lib/bash" "/lib/bash" "/lib64/bash" "/usr/local/lib/bash" "/usr/local/lib64/bash"
- do
- [ -z "${bash_modules_path}" -o ! -d "${bash_modules_path}" ] && continue
-
- # check for sleep
- for bash_module_sleep in "sleep" "sleep.so"
- do
- if [ -f "${bash_modules_path}/${bash_module_sleep}" ]
- then
- if enable -f "${bash_modules_path}/${bash_module_sleep}" sleep 2>/dev/null
- then
- mysleep="mysleep_builtin"
- # echo >&2 "$0: Using bash loadable ${bash_modules_path}/${bash_module_sleep} for sleep"
- break
- fi
- fi
-
- done
-
- [ ! -z "${mysleep}" ] && break
- done
+if [ -z "${mysleep}" -a "$((BASH_VERSINFO[0] + 0))" -ge 3 -a "${NETDATA_BASH_LOADABLES}" != "DISABLE" ]; then
+ # enable modules only for bash version 3+
+
+ for bash_modules_path in ${BASH_LOADABLES_PATH//:/ } "$(pkg-config bash --variable=loadablesdir 2>/dev/null)" "/usr/lib/bash" "/lib/bash" "/lib64/bash" "/usr/local/lib/bash" "/usr/local/lib64/bash"; do
+ [ -z "${bash_modules_path}" -o ! -d "${bash_modules_path}" ] && continue
+
+ # check for sleep
+ for bash_module_sleep in "sleep" "sleep.so"; do
+ if [ -f "${bash_modules_path}/${bash_module_sleep}" ]; then
+ if enable -f "${bash_modules_path}/${bash_module_sleep}" sleep 2>/dev/null; then
+ mysleep="mysleep_builtin"
+ # echo >&2 "$0: Using bash loadable ${bash_modules_path}/${bash_module_sleep} for sleep"
+ break
+ fi
+ fi
+
+ done
+
+ [ ! -z "${mysleep}" ] && break
+ done
fi
# -----------------------------------------------------------------------------
@@ -150,7 +135,6 @@ fi
[ -z "${mysleep}" ] && mysleep="sleep"
-
# -----------------------------------------------------------------------------
# this function is used to sleep a fraction of a second
# it calculates the difference between every time is called
@@ -163,60 +147,58 @@ LOOPSLEEPMS_LASTSLEEP=0
LOOPSLEEPMS_LASTWORK=0
loopsleepms() {
- local tellwork=0 t="${1}" div s m now mstosleep
-
- if [ "${t}" = "tellwork" ]
- then
- tellwork=1
- shift
- t="${1}"
- fi
-
- # $t = the time in seconds to wait
-
- # if high resolution is not supported
- # just sleep the time requested, in seconds
- if [ ${LOOPSLEEPMS_HIGHRES} -eq 0 ]
- then
- sleep ${t}
- return
- fi
-
- # get the current time, in ms in ${now_ms}
- ${current_time_ms}
-
- # calculate ms since last run
- [ ${LOOPSLEEPMS_LASTRUN} -gt 0 ] && \
- LOOPSLEEPMS_LASTWORK=$((now_ms - LOOPSLEEPMS_LASTRUN - LOOPSLEEPMS_LASTSLEEP + current_time_ms_accuracy))
- # echo "# last loop's work took $LOOPSLEEPMS_LASTWORK ms"
-
- # remember this run
- LOOPSLEEPMS_LASTRUN=${now_ms}
-
- # calculate the next run
- LOOPSLEEPMS_NEXTRUN=$(( ( now_ms - ( now_ms % ( t * 1000 ) ) ) + ( t * 1000 ) ))
-
- # calculate ms to sleep
- mstosleep=$(( LOOPSLEEPMS_NEXTRUN - now_ms + current_time_ms_accuracy ))
- # echo "# mstosleep is $mstosleep ms"
-
- # if we are too slow, sleep some time
- test ${mstosleep} -lt 200 && mstosleep=200
-
- s=$(( mstosleep / 1000 ))
- m=$(( mstosleep - (s * 1000) ))
- [ "${m}" -lt 100 ] && m="0${m}"
- [ "${m}" -lt 10 ] && m="0${m}"
-
- test $tellwork -eq 1 && echo >&2 " >>> PERFORMANCE >>> WORK TOOK ${LOOPSLEEPMS_LASTWORK} ms ( $((LOOPSLEEPMS_LASTWORK * 100 / 1000)).$((LOOPSLEEPMS_LASTWORK % 10))% cpu ) >>> SLEEPING ${mstosleep} ms"
-
- # echo "# sleeping ${s}.${m}"
- # echo
- ${mysleep} ${s}.${m}
-
- # keep the values we need
- # for our next run
- LOOPSLEEPMS_LASTSLEEP=$mstosleep
+ local tellwork=0 t="${1}" div s m now mstosleep
+
+ if [ "${t}" = "tellwork" ]; then
+ tellwork=1
+ shift
+ t="${1}"
+ fi
+
+ # $t = the time in seconds to wait
+
+ # if high resolution is not supported
+ # just sleep the time requested, in seconds
+ if [ ${LOOPSLEEPMS_HIGHRES} -eq 0 ]; then
+ sleep ${t}
+ return
+ fi
+
+ # get the current time, in ms in ${now_ms}
+ ${current_time_ms}
+
+ # calculate ms since last run
+ [ ${LOOPSLEEPMS_LASTRUN} -gt 0 ] &&
+ LOOPSLEEPMS_LASTWORK=$((now_ms - LOOPSLEEPMS_LASTRUN - LOOPSLEEPMS_LASTSLEEP + current_time_ms_accuracy))
+ # echo "# last loop's work took $LOOPSLEEPMS_LASTWORK ms"
+
+ # remember this run
+ LOOPSLEEPMS_LASTRUN=${now_ms}
+
+ # calculate the next run
+ LOOPSLEEPMS_NEXTRUN=$(((now_ms - (now_ms % (t * 1000))) + (t * 1000)))
+
+ # calculate ms to sleep
+ mstosleep=$((LOOPSLEEPMS_NEXTRUN - now_ms + current_time_ms_accuracy))
+ # echo "# mstosleep is $mstosleep ms"
+
+ # if we are too slow, sleep some time
+ test ${mstosleep} -lt 200 && mstosleep=200
+
+ s=$((mstosleep / 1000))
+ m=$((mstosleep - (s * 1000)))
+ [ "${m}" -lt 100 ] && m="0${m}"
+ [ "${m}" -lt 10 ] && m="0${m}"
+
+ test $tellwork -eq 1 && echo >&2 " >>> PERFORMANCE >>> WORK TOOK ${LOOPSLEEPMS_LASTWORK} ms ( $((LOOPSLEEPMS_LASTWORK * 100 / 1000)).$((LOOPSLEEPMS_LASTWORK % 10))% cpu ) >>> SLEEPING ${mstosleep} ms"
+
+ # echo "# sleeping ${s}.${m}"
+ # echo
+ ${mysleep} ${s}.${m}
+
+ # keep the values we need
+ # for our next run
+ LOOPSLEEPMS_LASTSLEEP=$mstosleep
}
# test it
diff --git a/collectors/charts.d.plugin/mem_apps/README.md b/collectors/charts.d.plugin/mem_apps/README.md
index cd8adf0a..a9513e9f 100644
--- a/collectors/charts.d.plugin/mem_apps/README.md
+++ b/collectors/charts.d.plugin/mem_apps/README.md
@@ -1,2 +1,6 @@
+# mem_apps
+
> THIS MODULE IS OBSOLETE.
-> USE APPS.PLUGIN.
+> USE [APPS.PLUGIN](../../apps.plugin).
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fmem_apps%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/charts.d.plugin/mem_apps/mem_apps.chart.sh b/collectors/charts.d.plugin/mem_apps/mem_apps.chart.sh
index a13dc71f..b9b84a46 100644
--- a/collectors/charts.d.plugin/mem_apps/mem_apps.chart.sh
+++ b/collectors/charts.d.plugin/mem_apps/mem_apps.chart.sh
@@ -20,8 +20,7 @@ mem_apps_check() {
# - 0 to enable the chart
# - 1 to disable the chart
- if [ -z "$mem_apps_apps" ]
- then
+ if [ -z "$mem_apps_apps" ]; then
error "manual configuration required: please set mem_apps_apps='command1 command2 ...' in $confd/mem_apps_apps.conf"
return 1
fi
@@ -35,8 +34,7 @@ mem_apps_create() {
echo "CHART chartsd_apps.mem '' 'Apps Memory' MB apps apps.mem stacked 20000 $mem_apps_update_every"
local x=
- for x in $mem_apps_apps
- do
+ for x in $mem_apps_apps; do
echo "DIMENSION $x $x absolute 1 1024"
# this string is needed later in the update() function
@@ -52,9 +50,10 @@ mem_apps_update() {
# remember: KEEP IT SIMPLE AND SHORT
echo "BEGIN chartsd_apps.mem"
- ps -o comm,rss -C "$mem_apps_apps" |\
- grep -v "^COMMAND" |\
- ( sed -e "s/ \+/ /g" -e "s/ /+=/g";
+ ps -o comm,rss -C "$mem_apps_apps" |
+ grep -v "^COMMAND" |
+ (
+ sed -e "s/ \+/ /g" -e "s/ /+=/g"
echo "$mem_apps_bc_finalze"
) | bc
echo "END"
diff --git a/collectors/charts.d.plugin/mysql/README.md b/collectors/charts.d.plugin/mysql/README.md
index 6765b53a..e52449a4 100644
--- a/collectors/charts.d.plugin/mysql/README.md
+++ b/collectors/charts.d.plugin/mysql/README.md
@@ -1,8 +1,8 @@
-> THIS MODULE IS OBSOLETE.
-> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
-
# mysql
+> THIS MODULE IS OBSOLETE.
+> USE [THE PYTHON ONE](../../python.d.plugin/mysql) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
+
The plugin will monitor one or more mysql servers
It will produce the following charts:
@@ -79,3 +79,5 @@ If no configuration is given, the plugin will attempt to connect to mysql server
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fmysql%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/charts.d.plugin/mysql/mysql.chart.sh b/collectors/charts.d.plugin/mysql/mysql.chart.sh
index 37e8e2a7..e1207dc9 100644
--- a/collectors/charts.d.plugin/mysql/mysql.chart.sh
+++ b/collectors/charts.d.plugin/mysql/mysql.chart.sh
@@ -27,19 +27,19 @@ mysql_get() {
#arr=($(run "${@}" -e "SHOW GLOBAL STATUS WHERE value REGEXP '^[0-9]';" | egrep "^(Bytes|Slow_|Que|Handl|Table|Selec|Sort_|Creat|Conne|Abort|Binlo|Threa|Innod|Qcach|Key_|Open)" ))
#arr=($(run "${@}" -N -e "SHOW GLOBAL STATUS;" | egrep "^(Bytes|Slow_|Que|Handl|Table|Selec|Sort_|Creat|Conne|Abort|Binlo|Threa|Innod|Qcach|Key_|Open)[^ ]+\s[0-9]" ))
# shellcheck disable=SC2207
- arr=($(run "${@}" -N -e "SHOW GLOBAL STATUS;" | grep -E "^(Bytes|Slow_|Que|Handl|Table|Selec|Sort_|Creat|Conne|Abort|Binlo|Threa|Innod|Qcach|Key_|Open)[^[:space:]]+[[:space:]]+[0-9]+" ))
+ arr=($(run "${@}" -N -e "SHOW GLOBAL STATUS;" | grep -E "^(Bytes|Slow_|Que|Handl|Table|Selec|Sort_|Creat|Conne|Abort|Binlo|Threa|Innod|Qcach|Key_|Open)[^[:space:]]+[[:space:]]+[0-9]+"))
IFS="${oIFS}"
[ "${#arr[@]}" -lt 3 ] && return 1
local end=${#arr[@]}
- for ((i=2;i<end;i+=2)); do
- mysql_data["${arr[$i]}"]=${arr[$i+1]}
+ for ((i = 2; i < end; i += 2)); do
+ mysql_data["${arr[$i]}"]=${arr[i + 1]}
done
[ -z "${mysql_data[Connections]}" ] && return 1
mysql_data[Thread_cache_misses]=0
- [ $(( mysql_data[Connections] + 1 - 1 )) -gt 0 ] && mysql_data[Thread_cache_misses]=$(( mysql_data[Threads_created] * 10000 / mysql_data[Connections] ))
+ [ $((mysql_data[Connections] + 1 - 1)) -gt 0 ] && mysql_data[Thread_cache_misses]=$((mysql_data[Threads_created] * 10000 / mysql_data[Connections]))
return 0
}
@@ -51,23 +51,20 @@ mysql_check() {
local x m mysql_cmd tryroot=0 unconfigured=0
- if [ "${1}" = "tryroot" ]
- then
+ if [ "${1}" = "tryroot" ]; then
tryroot=1
shift
fi
- # shellcheck disable=SC2230
+ # shellcheck disable=SC2230
[ -z "${mysql_cmd}" ] && mysql_cmd="$(which mysql 2>/dev/null || command -v mysql 2>/dev/null)"
- if [ ${#mysql_opts[@]} -eq 0 ]
- then
+ if [ ${#mysql_opts[@]} -eq 0 ]; then
unconfigured=1
mysql_cmds[local]="$mysql_cmd"
- if [ $tryroot -eq 1 ]
- then
+ if [ $tryroot -eq 1 ]; then
# the user has not configured us for mysql access
# if the root user is passwordless in mysql, we can
# attempt to connect to mysql as root
@@ -78,19 +75,16 @@ mysql_check() {
fi
# check once if the url works
- for m in "${!mysql_opts[@]}"
- do
+ for m in "${!mysql_opts[@]}"; do
[ -z "${mysql_cmds[$m]}" ] && mysql_cmds[$m]="$mysql_cmd"
- if [ -z "${mysql_cmds[$m]}" ]
- then
+ if [ -z "${mysql_cmds[$m]}" ]; then
# shellcheck disable=SC2154
error "cannot get mysql command for '${m}'. Please set mysql_cmds[$m]='/path/to/mysql', in $confd/mysql.conf"
fi
mysql_get "${mysql_cmds[$m]}" ${mysql_opts[$m]}
# shellcheck disable=SC2181
- if [ ! $? -eq 0 ]
- then
+ if [ ! $? -eq 0 ]; then
error "cannot get global status for '$m'. Please set mysql_opts[$m]='options' to whatever needed to get connected to the mysql server, in $confd/mysql.conf"
unset "mysql_cmds[$m]"
unset "mysql_opts[$m]"
@@ -98,13 +92,11 @@ mysql_check() {
continue
fi
- mysql_ids[$m]="$( fixid "$m" )"
+ mysql_ids[$m]="$(fixid "$m")"
done
- if [ ${#mysql_opts[@]} -eq 0 ]
- then
- if [ ${unconfigured} -eq 1 ] && [ ${tryroot} -eq 0 ]
- then
+ if [ ${#mysql_opts[@]} -eq 0 ]; then
+ if [ ${unconfigured} -eq 1 ] && [ ${tryroot} -eq 0 ]; then
mysql_check tryroot "${@}"
return $?
else
@@ -120,8 +112,7 @@ mysql_create() {
local x
# create the charts
- for x in "${mysql_ids[@]}"
- do
+ for x in "${mysql_ids[@]}"; do
cat <<EOF
CHART mysql_$x.net '' "mysql Bandwidth" "kilobits/s" bandwidth mysql.net area $((mysql_priority + 1)) $mysql_update_every
DIMENSION Bytes_received in incremental 8 1024
@@ -230,7 +221,7 @@ DIMENSION Innodb_buffer_pool_pages_flushed flushed incremental -1 1
DIMENSION Innodb_buffer_pool_pages_misc misc absolute -1 1
DIMENSION Innodb_buffer_pool_pages_total total absolute 1 1
-CHART mysql_$x.innodb_buffer_pool_bytes '' "mysql InnoDB Buffer Pool Bytes" "MB" innodb mysql.innodb_buffer_pool_bytes area $((mysql_priority + 21)) $mysql_update_every
+CHART mysql_$x.innodb_buffer_pool_bytes '' "mysql InnoDB Buffer Pool Bytes" "MiB" innodb mysql.innodb_buffer_pool_bytes area $((mysql_priority + 21)) $mysql_update_every
DIMENSION Innodb_buffer_pool_bytes_data data absolute 1 $((1024 * 1024))
DIMENSION Innodb_buffer_pool_bytes_dirty dirty absolute -1 $((1024 * 1024))
@@ -256,7 +247,7 @@ DIMENSION Qcache_not_cached 'not cached' incremental -1 1
CHART mysql_$x.qcache '' "mysql QCache Queries in Cache" "queries" qcache mysql.qcache line $((mysql_priority + 26)) $mysql_update_every
DIMENSION Qcache_queries_in_cache queries absolute 1 1
-CHART mysql_$x.qcache_freemem '' "mysql QCache Free Memory" "MB" qcache mysql.qcache_freemem area $((mysql_priority + 27)) $mysql_update_every
+CHART mysql_$x.qcache_freemem '' "mysql QCache Free Memory" "MiB" qcache mysql.qcache_freemem area $((mysql_priority + 27)) $mysql_update_every
DIMENSION Qcache_free_memory free absolute 1 $((1024 * 1024))
CHART mysql_$x.qcache_memblocks '' "mysql QCache Memory Blocks" "blocks" qcache mysql.qcache_memblocks line $((mysql_priority + 28)) $mysql_update_every
@@ -283,18 +274,16 @@ CHART mysql_$x.files_rate '' "mysql Opened Files Rate" "files/s" files mysql.fil
DIMENSION Opened_files files incremental 1 1
EOF
- if [ ! -z "${mysql_data[Binlog_stmt_cache_disk_use]}" ]
- then
- cat <<EOF
+ if [ ! -z "${mysql_data[Binlog_stmt_cache_disk_use]}" ]; then
+ cat <<EOF
CHART mysql_$x.binlog_stmt_cache '' "mysql Binlog Statement Cache" "statements/s" binlog mysql.binlog_stmt_cache line $((mysql_priority + 50)) $mysql_update_every
DIMENSION Binlog_stmt_cache_disk_use disk incremental 1 1
DIMENSION Binlog_stmt_cache_use all incremental 1 1
EOF
- fi
+ fi
- if [ ! -z "${mysql_data[Connection_errors_accept]}" ]
- then
- cat <<EOF
+ if [ ! -z "${mysql_data[Connection_errors_accept]}" ]; then
+ cat <<EOF
CHART mysql_$x.connection_errors '' "mysql Connection Errors" "connections/s" connections mysql.connection_errors line $((mysql_priority + 51)) $mysql_update_every
DIMENSION Connection_errors_accept accept incremental 1 1
DIMENSION Connection_errors_internal internal incremental 1 1
@@ -303,13 +292,12 @@ DIMENSION Connection_errors_peer_addr peer_addr incremental 1 1
DIMENSION Connection_errors_select select incremental 1 1
DIMENSION Connection_errors_tcpwrap tcpwrap incremental 1 1
EOF
- fi
+ fi
done
return 0
}
-
mysql_update() {
# the first argument to this function is the microseconds since last update
# pass this parameter to the BEGIN statement (see bellow).
@@ -319,14 +307,12 @@ mysql_update() {
# remember: KEEP IT SIMPLE AND SHORT
local m x
- for m in "${!mysql_ids[@]}"
- do
+ for m in "${!mysql_ids[@]}"; do
x="${mysql_ids[$m]}"
mysql_get "${mysql_cmds[$m]}" ${mysql_opts[$m]}
# shellcheck disable=SC2181
- if [ $? -ne 0 ]
- then
+ if [ $? -ne 0 ]; then
unset "mysql_ids[$m]"
unset "mysql_opts[$m]"
unset "mysql_cmds[$m]"
@@ -497,8 +483,7 @@ SET Opened_files = ${mysql_data[Opened_files]}
END
VALUESEOF
- if [ ! -z "${mysql_data[Binlog_stmt_cache_disk_use]}" ]
- then
+ if [ ! -z "${mysql_data[Binlog_stmt_cache_disk_use]}" ]; then
cat <<VALUESEOF
BEGIN mysql_$x.binlog_stmt_cache $1
SET Binlog_stmt_cache_disk_use = ${mysql_data[Binlog_stmt_cache_disk_use]}
@@ -507,8 +492,7 @@ END
VALUESEOF
fi
- if [ ! -z "${mysql_data[Connection_errors_accept]}" ]
- then
+ if [ ! -z "${mysql_data[Connection_errors_accept]}" ]; then
cat <<VALUESEOF
BEGIN mysql_$x.connection_errors $1
SET Connection_errors_accept = ${mysql_data[Connection_errors_accept]}
@@ -525,4 +509,3 @@ VALUESEOF
[ ${#mysql_ids[@]} -eq 0 ] && error "no mysql servers left active." && return 1
return 0
}
-
diff --git a/collectors/charts.d.plugin/nginx/README.md b/collectors/charts.d.plugin/nginx/README.md
index d82951aa..42a4f812 100644
--- a/collectors/charts.d.plugin/nginx/README.md
+++ b/collectors/charts.d.plugin/nginx/README.md
@@ -1,2 +1,6 @@
+# nginx
+
> THIS MODULE IS OBSOLETE.
-> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
+> USE [THE PYTHON ONE](../../python.d.plugin/nginx) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fnginx%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/charts.d.plugin/nginx/nginx.chart.sh b/collectors/charts.d.plugin/nginx/nginx.chart.sh
index 14dda083..812de2cb 100644
--- a/collectors/charts.d.plugin/nginx/nginx.chart.sh
+++ b/collectors/charts.d.plugin/nginx/nginx.chart.sh
@@ -32,16 +32,15 @@ nginx_get() {
# shellcheck disable=SC2181
if [ $? -ne 0 ] || [ "${#nginx_response[@]}" -eq 0 ]; then return 1; fi
- if [ "${nginx_response[0]}" != "Active" ] ||\
- [ "${nginx_response[1]}" != "connections:" ] ||\
- [ "${nginx_response[3]}" != "server" ] ||\
- [ "${nginx_response[4]}" != "accepts" ] ||\
- [ "${nginx_response[5]}" != "handled" ] ||\
- [ "${nginx_response[6]}" != "requests" ] ||\
- [ "${nginx_response[10]}" != "Reading:" ] ||\
- [ "${nginx_response[12]}" != "Writing:" ] ||\
- [ "${nginx_response[14]}" != "Waiting:" ]
- then
+ if [ "${nginx_response[0]}" != "Active" ] ||
+ [ "${nginx_response[1]}" != "connections:" ] ||
+ [ "${nginx_response[3]}" != "server" ] ||
+ [ "${nginx_response[4]}" != "accepts" ] ||
+ [ "${nginx_response[5]}" != "handled" ] ||
+ [ "${nginx_response[6]}" != "requests" ] ||
+ [ "${nginx_response[10]}" != "Reading:" ] ||
+ [ "${nginx_response[12]}" != "Writing:" ] ||
+ [ "${nginx_response[14]}" != "Waiting:" ]; then
error "Invalid response from nginx server: ${nginx_response[*]}"
return 1
fi
@@ -54,14 +53,13 @@ nginx_get() {
nginx_writing="${nginx_response[13]}"
nginx_waiting="${nginx_response[15]}"
- if [ -z "${nginx_active_connections}" ] ||\
- [ -z "${nginx_accepts}" ] ||\
- [ -z "${nginx_handled}" ] ||\
- [ -z "${nginx_requests}" ] ||\
- [ -z "${nginx_reading}" ] ||\
- [ -z "${nginx_writing}" ] ||\
- [ -z "${nginx_waiting}" ]
- then
+ if [ -z "${nginx_active_connections}" ] ||
+ [ -z "${nginx_accepts}" ] ||
+ [ -z "${nginx_handled}" ] ||
+ [ -z "${nginx_requests}" ] ||
+ [ -z "${nginx_reading}" ] ||
+ [ -z "${nginx_writing}" ] ||
+ [ -z "${nginx_waiting}" ]; then
error "empty values got from nginx server: ${nginx_response[*]}"
return 1
fi
@@ -74,8 +72,7 @@ nginx_check() {
nginx_get
# shellcheck disable=2181
- if [ $? -ne 0 ]
- then
+ if [ $? -ne 0 ]; then
# shellcheck disable=SC2154
error "cannot find stub_status on URL '${nginx_url}'. Please set nginx_url='http://nginx.server/stub_status' in $confd/nginx.conf"
return 1
diff --git a/collectors/charts.d.plugin/nut/README.md b/collectors/charts.d.plugin/nut/README.md
index 71906f55..3e169936 100644
--- a/collectors/charts.d.plugin/nut/README.md
+++ b/collectors/charts.d.plugin/nut/README.md
@@ -57,3 +57,5 @@ nut_update_every=2
```
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fnut%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/charts.d.plugin/nut/nut.chart.sh b/collectors/charts.d.plugin/nut/nut.chart.sh
index 7e252f32..933d3561 100644
--- a/collectors/charts.d.plugin/nut/nut.chart.sh
+++ b/collectors/charts.d.plugin/nut/nut.chart.sh
@@ -34,8 +34,7 @@ nut_get_all() {
nut_get() {
run -t $nut_timeout upsc "$1"
- if [ "${nut_clients_chart}" -eq "1" ]
- then
+ if [ "${nut_clients_chart}" -eq "1" ]; then
printf "ups.connected_clients: "
run -t $nut_timeout upsc -c "$1" | wc -l
fi
@@ -51,27 +50,23 @@ nut_check() {
require_cmd upsc || return 1
- [ -z "$nut_ups" ] && nut_ups="$( nut_get_all )"
+ [ -z "$nut_ups" ] && nut_ups="$(nut_get_all)"
- for x in $nut_ups
- do
+ for x in $nut_ups; do
nut_get "$x" >/dev/null
# shellcheck disable=SC2181
- if [ $? -eq 0 ]
- then
- if [ ! -z "${nut_names[${x}]}" ]
- then
- nut_ids[$x]="$( fixid "${nut_names[${x}]}" )"
+ if [ $? -eq 0 ]; then
+ if [ ! -z "${nut_names[${x}]}" ]; then
+ nut_ids[$x]="$(fixid "${nut_names[${x}]}")"
else
- nut_ids[$x]="$( fixid "$x" )"
+ nut_ids[$x]="$(fixid "$x")"
fi
continue
fi
error "cannot get information for NUT UPS '$x'."
done
- if [ ${#nut_ids[@]} -eq 0 ]
- then
+ if [ ${#nut_ids[@]} -eq 0 ]; then
# shellcheck disable=SC2154
error "Cannot find UPSes - please set nut_ups='ups_name' in $confd/nut.conf"
return 1
@@ -84,8 +79,7 @@ nut_create() {
# create the charts
local x
- for x in "${nut_ids[@]}"
- do
+ for x in "${nut_ids[@]}"; do
cat <<EOF
CHART nut_$x.charge '' "UPS Charge" "percentage" ups nut.charge area $((nut_priority + 1)) $nut_update_every
DIMENSION battery_charge charge absolute 1 100
@@ -121,20 +115,18 @@ CHART nut_$x.temp '' "UPS Temperature" "temperature" ups nut.temperature line $(
DIMENSION temp temp absolute 1 100
EOF
- if [ "${nut_clients_chart}" = "1" ]
- then
- cat <<EOF2
+ if [ "${nut_clients_chart}" = "1" ]; then
+ cat <<EOF2
CHART nut_$x.clients '' "UPS Connected Clients" "clients" ups nut.clients area $((nut_priority + 9)) $nut_update_every
DIMENSION clients '' absolute 1 1
EOF2
- fi
+ fi
done
return 0
}
-
nut_update() {
# the first argument to this function is the microseconds since last update
# pass this parameter to the BEGIN statement (see bellow).
@@ -144,8 +136,7 @@ nut_update() {
# remember: KEEP IT SIMPLE AND SHORT
local i x
- for i in "${!nut_ids[@]}"
- do
+ for i in "${!nut_ids[@]}"; do
x="${nut_ids[$i]}"
nut_get "$i" | awk "
BEGIN {
diff --git a/collectors/charts.d.plugin/opensips/README.md b/collectors/charts.d.plugin/opensips/README.md
index e69de29b..cb056da8 100644
--- a/collectors/charts.d.plugin/opensips/README.md
+++ b/collectors/charts.d.plugin/opensips/README.md
@@ -0,0 +1,7 @@
+# OpenSIPS
+
+*Under construction*
+
+Collects OpenSIPS metrics
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fopensips%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/charts.d.plugin/opensips/opensips.chart.sh b/collectors/charts.d.plugin/opensips/opensips.chart.sh
index c227bd4f..b42462d6 100644
--- a/collectors/charts.d.plugin/opensips/opensips.chart.sh
+++ b/collectors/charts.d.plugin/opensips/opensips.chart.sh
@@ -14,8 +14,8 @@ opensips_timeout=2
opensips_priority=80000
opensips_get_stats() {
- run -t $opensips_timeout "$opensips_cmd" $opensips_opts |\
- grep "^\(core\|dialog\|net\|registrar\|shmem\|siptrace\|sl\|tm\|uri\|usrloc\):[a-zA-Z0-9_-]\+[[:space:]]*[=:]\+[[:space:]]*[0-9]\+[[:space:]]*$" |\
+ run -t $opensips_timeout "$opensips_cmd" $opensips_opts |
+ grep "^\(core\|dialog\|net\|registrar\|shmem\|siptrace\|sl\|tm\|uri\|usrloc\):[a-zA-Z0-9_-]\+[[:space:]]*[=:]\+[[:space:]]*[0-9]\+[[:space:]]*$" |
sed \
-e "s|[[:space:]]*[=:]\+[[:space:]]*\([0-9]\+\)[[:space:]]*$|=\1|g" \
-e "s|[[:space:]:-]\+|_|g" \
@@ -29,8 +29,7 @@ opensips_get_stats() {
opensips_check() {
# if the user did not provide an opensips_cmd
# try to find it in the system
- if [ -z "$opensips_cmd" ]
- then
+ if [ -z "$opensips_cmd" ]; then
require_cmd opensipsctl || return 1
fi
@@ -38,8 +37,7 @@ opensips_check() {
local x
x="$(opensips_get_stats | grep "^opensips_core_")"
# shellcheck disable=SC2181
- if [ ! $? -eq 0 ] || [ -z "$x" ]
- then
+ if [ ! $? -eq 0 ] || [ -z "$x" ]; then
error "cannot get global status. Please set opensips_opts='options' whatever needed to get connected to opensips server, in $confd/opensips.conf"
return 1
fi
diff --git a/collectors/charts.d.plugin/phpfpm/README.md b/collectors/charts.d.plugin/phpfpm/README.md
index d82951aa..36462ba9 100644
--- a/collectors/charts.d.plugin/phpfpm/README.md
+++ b/collectors/charts.d.plugin/phpfpm/README.md
@@ -1,2 +1,6 @@
+# phpfm
+
> THIS MODULE IS OBSOLETE.
-> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
+> USE [THE PYTHON ONE](../../python.d.plugin/phpfpm) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fphpfpm%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/charts.d.plugin/phpfpm/phpfpm.chart.sh b/collectors/charts.d.plugin/phpfpm/phpfpm.chart.sh
index 1af7910b..b1edb237 100644
--- a/collectors/charts.d.plugin/phpfpm/phpfpm.chart.sh
+++ b/collectors/charts.d.plugin/phpfpm/phpfpm.chart.sh
@@ -44,17 +44,7 @@ phpfpm_get() {
return 1
fi
- if [[ "${phpfpm_response[0]}" != "pool:" \
- || "${phpfpm_response[2]}" != "process" \
- || "${phpfpm_response[5]}" != "start" \
- || "${phpfpm_response[12]}" != "accepted" \
- || "${phpfpm_response[15]}" != "listen" \
- || "${phpfpm_response[16]}" != "queue:" \
- || "${phpfpm_response[26]}" != "idle" \
- || "${phpfpm_response[29]}" != "active" \
- || "${phpfpm_response[32]}" != "total" \
- ]]
- then
+ if [[ ${phpfpm_response[0]} != "pool:" || ${phpfpm_response[2]} != "process" || ${phpfpm_response[5]} != "start" || ${phpfpm_response[12]} != "accepted" || ${phpfpm_response[15]} != "listen" || ${phpfpm_response[16]} != "queue:" || ${phpfpm_response[26]} != "idle" || ${phpfpm_response[29]} != "active" || ${phpfpm_response[32]} != "total" ]]; then
error "invalid response from phpfpm status server: ${phpfpm_response[*]}"
return 1
fi
@@ -71,27 +61,13 @@ phpfpm_get() {
phpfpm_total_processes="${phpfpm_response[34]}"
phpfpm_max_active_processes="${phpfpm_response[38]}"
phpfpm_max_children_reached="${phpfpm_response[42]}"
- if [ "${phpfpm_response[43]}" == "slow" ]
- then
- phpfpm_slow_requests="${phpfpm_response[45]}"
+ if [ "${phpfpm_response[43]}" == "slow" ]; then
+ phpfpm_slow_requests="${phpfpm_response[45]}"
else
- phpfpm_slow_requests="-1"
+ phpfpm_slow_requests="-1"
fi
- if [[ -z "${phpfpm_pool}" \
- || -z "${phpfpm_start_time}" \
- || -z "${phpfpm_start_since}" \
- || -z "${phpfpm_accepted_conn}" \
- || -z "${phpfpm_listen_queue}" \
- || -z "${phpfpm_max_listen_queue}" \
- || -z "${phpfpm_listen_queue_len}" \
- || -z "${phpfpm_idle_processes}" \
- || -z "${phpfpm_active_processes}" \
- || -z "${phpfpm_total_processes}" \
- || -z "${phpfpm_max_active_processes}" \
- || -z "${phpfpm_max_children_reached}" \
- ]]
- then
+ if [[ -z ${phpfpm_pool} || -z ${phpfpm_start_time} || -z ${phpfpm_start_since} || -z ${phpfpm_accepted_conn} || -z ${phpfpm_listen_queue} || -z ${phpfpm_max_listen_queue} || -z ${phpfpm_listen_queue_len} || -z ${phpfpm_idle_processes} || -z ${phpfpm_active_processes} || -z ${phpfpm_total_processes} || -z ${phpfpm_max_active_processes} || -z ${phpfpm_max_children_reached} ]]; then
error "empty values got from phpfpm status server: ${phpfpm_response[*]}"
return 1
fi
@@ -106,8 +82,7 @@ phpfpm_check() {
fi
local m
- for m in "${!phpfpm_urls[@]}"
- do
+ for m in "${!phpfpm_urls[@]}"; do
phpfpm_get "${phpfpm_curl_opts[$m]}" "${phpfpm_urls[$m]}"
# shellcheck disable=SC2181
if [ $? -ne 0 ]; then
@@ -133,8 +108,7 @@ phpfpm_check() {
# _create is called once, to create the charts
phpfpm_create() {
local m
- for m in "${!phpfpm_urls[@]}"
- do
+ for m in "${!phpfpm_urls[@]}"; do
cat <<EOF
CHART phpfpm_$m.connections '' "PHP-FPM Active Connections" "connections" phpfpm phpfpm.connections line $((phpfpm_priority + 1)) $phpfpm_update_every
DIMENSION active '' absolute 1 1
@@ -147,8 +121,7 @@ DIMENSION requests '' incremental 1 1
CHART phpfpm_$m.performance '' "PHP-FPM Performance" "status" phpfpm phpfpm.performance line $((phpfpm_priority + 3)) $phpfpm_update_every
DIMENSION reached 'max children reached' absolute 1 1
EOF
- if [ $((phpfpm_slow_requests)) -ne -1 ]
- then
+ if [ $((phpfpm_slow_requests)) -ne -1 ]; then
echo "DIMENSION slow 'slow requests' absolute 1 1"
fi
done
@@ -166,8 +139,7 @@ phpfpm_update() {
# remember: KEEP IT SIMPLE AND SHORT
local m
- for m in "${!phpfpm_urls[@]}"
- do
+ for m in "${!phpfpm_urls[@]}"; do
phpfpm_get "${phpfpm_curl_opts[$m]}" "${phpfpm_urls[$m]}"
# shellcheck disable=SC2181
if [ $? -ne 0 ]; then
@@ -187,8 +159,7 @@ END
BEGIN phpfpm_$m.performance $1
SET reached = $((phpfpm_max_children_reached))
EOF
- if [ $((phpfpm_slow_requests)) -ne -1 ]
- then
+ if [ $((phpfpm_slow_requests)) -ne -1 ]; then
echo "SET slow = $((phpfpm_slow_requests))"
fi
echo "END"
diff --git a/collectors/charts.d.plugin/postfix/README.md b/collectors/charts.d.plugin/postfix/README.md
index 5fc265d5..e0dc6330 100644
--- a/collectors/charts.d.plugin/postfix/README.md
+++ b/collectors/charts.d.plugin/postfix/README.md
@@ -1,8 +1,8 @@
-> THIS MODULE IS OBSOLETE.
-> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
-
# postfix
+> THIS MODULE IS OBSOLETE.
+> USE [THE PYTHON ONE](../../python.d.plugin/postfix) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
+
The plugin will collect the postfix queue size.
It will create two charts:
@@ -24,3 +24,5 @@ postfix_update_every=15
```
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fpostfix%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/charts.d.plugin/postfix/postfix.chart.sh b/collectors/charts.d.plugin/postfix/postfix.chart.sh
index 8cb938ce..ff59db9f 100644
--- a/collectors/charts.d.plugin/postfix/postfix.chart.sh
+++ b/collectors/charts.d.plugin/postfix/postfix.chart.sh
@@ -22,14 +22,12 @@ postfix_check() {
# - 1 to disable the chart
# try to find the postqueue executable
- if [ -z "$postfix_postqueue" ] || [ ! -x "$postfix_postqueue" ]
- then
+ if [ -z "$postfix_postqueue" ] || [ ! -x "$postfix_postqueue" ]; then
# shellcheck disable=SC2230
postfix_postqueue="$(which postqueue 2>/dev/null || command -v postqueue 2>/dev/null)"
fi
- if [ -z "$postfix_postqueue" ] || [ ! -x "$postfix_postqueue" ]
- then
+ if [ -z "$postfix_postqueue" ] || [ ! -x "$postfix_postqueue" ]; then
# shellcheck disable=SC2154
error "cannot find postqueue. Please set 'postfix_postqueue=/path/to/postqueue' in $confd/postfix.conf"
return 1
@@ -39,7 +37,7 @@ postfix_check() {
}
postfix_create() {
-cat <<EOF
+ cat <<EOF
CHART postfix_local.qemails '' "Postfix Queue Emails" "emails" queue postfix.queued.emails line $((postfix_priority + 1)) $postfix_update_every
DIMENSION emails '' absolute 1 1
CHART postfix_local.qsize '' "Postfix Queue Emails Size" "emails size in KB" queue postfix.queued.size area $((postfix_priority + 2)) $postfix_update_every
@@ -70,9 +68,9 @@ postfix_update() {
postfix_q_emails=0
postfix_q_size=0
- eval "$(run "$postfix_postqueue" -p |\
- grep "^--" |\
- sed -e "s/-- \([0-9]\+\) Kbytes in \([0-9]\+\) Requests.$/local postfix_q_size=\1\nlocal postfix_q_emails=\2/g" |\
+ eval "$(run "$postfix_postqueue" -p |
+ grep "^--" |
+ sed -e "s/-- \([0-9]\+\) Kbytes in \([0-9]\+\) Requests.$/local postfix_q_size=\1\nlocal postfix_q_emails=\2/g" |
grep -E "^local postfix_q_(emails|size)=[0-9]+$")"
# write the result of the work.
diff --git a/collectors/charts.d.plugin/sensors/README.md b/collectors/charts.d.plugin/sensors/README.md
index ddc3650d..4f3e46d6 100644
--- a/collectors/charts.d.plugin/sensors/README.md
+++ b/collectors/charts.d.plugin/sensors/README.md
@@ -1,9 +1,10 @@
+# sensors
+
> THIS MODULE IS OBSOLETE.
-> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
+> USE [THE PYTHON ONE](../../python.d.plugin/sensors) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
> Unlike the python one, this module can collect temperature on RPi.
-# sensors
The plugin will provide charts for all configured system sensors
@@ -50,3 +51,5 @@ sensors_excluded=()
```
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fsensors%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/charts.d.plugin/sensors/sensors.chart.sh b/collectors/charts.d.plugin/sensors/sensors.chart.sh
index 54368f1e..b9218777 100644
--- a/collectors/charts.d.plugin/sensors/sensors.chart.sh
+++ b/collectors/charts.d.plugin/sensors/sensors.chart.sh
@@ -38,8 +38,7 @@ sensors_find_all_files() {
sensors_find_all_dirs() {
# shellcheck disable=SC2162
- sensors_find_all_files "$1" | while read
- do
+ sensors_find_all_files "$1" | while read; do
dirname "$REPLY"
done | sort -u
}
@@ -51,7 +50,7 @@ sensors_check() {
# - 0 to enable the chart
# - 1 to disable the chart
- [ -z "$( sensors_find_all_files "$sensors_sys_dir" )" ] && error "no sensors found in '$sensors_sys_dir'." && return 1
+ [ -z "$(sensors_find_all_files "$sensors_sys_dir")" ] && error "no sensors found in '$sensors_sys_dir'." && return 1
return 0
}
@@ -60,15 +59,14 @@ sensors_check_files() {
# also remove not needed sensors
local f v excluded
- for f in "$@"
- do
+ for f in "$@"; do
[ ! -f "$f" ] && continue
for ex in "${sensors_excluded[@]}"; do
[[ $f =~ .*$ex$ ]] && excluded='1' && break
done
- [ "$excluded" != "1" ] && v="$( cat "$f" )" || v=0
- v=$(( v + 1 - 1 ))
+ [ "$excluded" != "1" ] && v="$(cat "$f")" || v=0
+ v=$((v + 1 - 1))
[ $v -ne 0 ] && echo "$f" && continue
excluded=
@@ -81,15 +79,14 @@ sensors_check_temp_type() {
# disabled sensors have the value 0
local f t v
- for f in "$@"
- do
+ for f in "$@"; do
# shellcheck disable=SC2001
- t=$( echo "$f" | sed "s|_input$|_type|g" )
+ t=$(echo "$f" | sed "s|_input$|_type|g")
[ "$f" = "$t" ] && echo "$f" && continue
[ ! -f "$t" ] && echo "$f" && continue
- v="$( cat "$t" )"
- v=$(( v + 1 - 1 ))
+ v="$(cat "$t")"
+ v=$((v + 1 - 1))
[ $v -ne 0 ] && echo "$f" && continue
error "$f is disabled"
@@ -105,120 +102,119 @@ sensors_create() {
# - the highest speed we can achieve -
[ $sensors_source_update -eq 1 ] && echo >"$TMP_DIR/sensors.sh" "sensors_update() {"
- for path in $( sensors_find_all_dirs "$sensors_sys_dir" | sort -u )
- do
- dir=$( basename "$path" )
+ for path in $(sensors_find_all_dirs "$sensors_sys_dir" | sort -u); do
+ dir=$(basename "$path")
device=
subsystem=
id=
type=
name=
- [ -h "$path/device" ] && device=$( readlink -f "$path/device" )
- [ ! -z "$device" ] && device=$( basename "$device" )
+ [ -h "$path/device" ] && device=$(readlink -f "$path/device")
+ [ ! -z "$device" ] && device=$(basename "$device")
[ -z "$device" ] && device="$dir"
- [ -h "$path/subsystem" ] && subsystem=$( readlink -f "$path/subsystem" )
- [ ! -z "$subsystem" ] && subsystem=$( basename "$subsystem" )
+ [ -h "$path/subsystem" ] && subsystem=$(readlink -f "$path/subsystem")
+ [ ! -z "$subsystem" ] && subsystem=$(basename "$subsystem")
[ -z "$subsystem" ] && subsystem="$dir"
- [ -f "$path/name" ] && name=$( cat "$path/name" )
+ [ -f "$path/name" ] && name=$(cat "$path/name")
[ -z "$name" ] && name="$dir"
- [ -f "$path/type" ] && type=$( cat "$path/type" )
+ [ -f "$path/type" ] && type=$(cat "$path/type")
[ -z "$type" ] && type="$dir"
- id="$( fixid "$device.$subsystem.$dir" )"
+ id="$(fixid "$device.$subsystem.$dir")"
debug "path='$path', dir='$dir', device='$device', subsystem='$subsystem', id='$id', name='$name'"
- for mode in temperature voltage fans power current energy humidity
- do
+ for mode in temperature voltage fans power current energy humidity; do
files=
multiplier=1
divisor=1
algorithm="absolute"
case $mode in
- temperature)
- files="$( ls "$path"/temp*_input 2>/dev/null; ls "$path/temp" 2>/dev/null )"
- files="$( sensors_check_files "$files" )"
- files="$( sensors_check_temp_type "$files" )"
- [ -z "$files" ] && continue
- echo "CHART sensors.temp_$id '' '$name Temperature' 'Celsius' 'temperature' 'sensors.temp' line $((sensors_priority + 1)) $sensors_update_every"
- echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.temp_$id \$1\""
- divisor=1000
- ;;
-
- voltage)
- files="$( ls "$path"/in*_input 2>/dev/null )"
- files="$( sensors_check_files "$files" )"
- [ -z "$files" ] && continue
- echo "CHART sensors.volt_$id '' '$name Voltage' 'Volts' 'voltage' 'sensors.volt' line $((sensors_priority + 2)) $sensors_update_every"
- echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.volt_$id \$1\""
- divisor=1000
- ;;
-
- current)
- files="$( ls "$path"/curr*_input 2>/dev/null )"
- files="$( sensors_check_files "$files" )"
- [ -z "$files" ] && continue
- echo "CHART sensors.curr_$id '' '$name Current' 'Ampere' 'current' 'sensors.curr' line $((sensors_priority + 3)) $sensors_update_every"
- echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.curr_$id \$1\""
- divisor=1000
- ;;
-
- power)
- files="$( ls "$path"/power*_input 2>/dev/null )"
- files="$( sensors_check_files "$files" )"
- [ -z "$files" ] && continue
- echo "CHART sensors.power_$id '' '$name Power' 'Watt' 'power' 'sensors.power' line $((sensors_priority + 4)) $sensors_update_every"
- echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.power_$id \$1\""
- divisor=1000000
- ;;
-
- fans)
- files="$( ls "$path"/fan*_input 2>/dev/null )"
- files="$( sensors_check_files "$files" )"
- [ -z "$files" ] && continue
- echo "CHART sensors.fan_$id '' '$name Fans Speed' 'Rotations / Minute' 'fans' 'sensors.fans' line $((sensors_priority + 5)) $sensors_update_every"
- echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.fan_$id \$1\""
- ;;
-
- energy)
- files="$( ls "$path"/energy*_input 2>/dev/null )"
- files="$( sensors_check_files "$files" )"
- [ -z "$files" ] && continue
- echo "CHART sensors.energy_$id '' '$name Energy' 'Joule' 'energy' 'sensors.energy' areastack $((sensors_priority + 6)) $sensors_update_every"
- echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.energy_$id \$1\""
- algorithm="incremental"
- divisor=1000000
- ;;
-
- humidity)
- files="$( ls "$path"/humidity*_input 2>/dev/null )"
- files="$( sensors_check_files "$files" )"
- [ -z "$files" ] && continue
- echo "CHART sensors.humidity_$id '' '$name Humidity' 'Percent' 'humidity' 'sensors.humidity' line $((sensors_priority + 7)) $sensors_update_every"
- echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.humidity_$id \$1\""
- divisor=1000
- ;;
-
- *)
- continue
- ;;
+ temperature)
+ files="$(
+ ls "$path"/temp*_input 2>/dev/null
+ ls "$path/temp" 2>/dev/null
+ )"
+ files="$(sensors_check_files "$files")"
+ files="$(sensors_check_temp_type "$files")"
+ [ -z "$files" ] && continue
+ echo "CHART sensors.temp_$id '' '$name Temperature' 'Celsius' 'temperature' 'sensors.temp' line $((sensors_priority + 1)) $sensors_update_every"
+ echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.temp_$id \$1\""
+ divisor=1000
+ ;;
+
+ voltage)
+ files="$(ls "$path"/in*_input 2>/dev/null)"
+ files="$(sensors_check_files "$files")"
+ [ -z "$files" ] && continue
+ echo "CHART sensors.volt_$id '' '$name Voltage' 'Volts' 'voltage' 'sensors.volt' line $((sensors_priority + 2)) $sensors_update_every"
+ echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.volt_$id \$1\""
+ divisor=1000
+ ;;
+
+ current)
+ files="$(ls "$path"/curr*_input 2>/dev/null)"
+ files="$(sensors_check_files "$files")"
+ [ -z "$files" ] && continue
+ echo "CHART sensors.curr_$id '' '$name Current' 'Ampere' 'current' 'sensors.curr' line $((sensors_priority + 3)) $sensors_update_every"
+ echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.curr_$id \$1\""
+ divisor=1000
+ ;;
+
+ power)
+ files="$(ls "$path"/power*_input 2>/dev/null)"
+ files="$(sensors_check_files "$files")"
+ [ -z "$files" ] && continue
+ echo "CHART sensors.power_$id '' '$name Power' 'Watt' 'power' 'sensors.power' line $((sensors_priority + 4)) $sensors_update_every"
+ echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.power_$id \$1\""
+ divisor=1000000
+ ;;
+
+ fans)
+ files="$(ls "$path"/fan*_input 2>/dev/null)"
+ files="$(sensors_check_files "$files")"
+ [ -z "$files" ] && continue
+ echo "CHART sensors.fan_$id '' '$name Fans Speed' 'Rotations / Minute' 'fans' 'sensors.fans' line $((sensors_priority + 5)) $sensors_update_every"
+ echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.fan_$id \$1\""
+ ;;
+
+ energy)
+ files="$(ls "$path"/energy*_input 2>/dev/null)"
+ files="$(sensors_check_files "$files")"
+ [ -z "$files" ] && continue
+ echo "CHART sensors.energy_$id '' '$name Energy' 'Joule' 'energy' 'sensors.energy' areastack $((sensors_priority + 6)) $sensors_update_every"
+ echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.energy_$id \$1\""
+ algorithm="incremental"
+ divisor=1000000
+ ;;
+
+ humidity)
+ files="$(ls "$path"/humidity*_input 2>/dev/null)"
+ files="$(sensors_check_files "$files")"
+ [ -z "$files" ] && continue
+ echo "CHART sensors.humidity_$id '' '$name Humidity' 'Percent' 'humidity' 'sensors.humidity' line $((sensors_priority + 7)) $sensors_update_every"
+ echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.humidity_$id \$1\""
+ divisor=1000
+ ;;
+
+ *)
+ continue
+ ;;
esac
- for x in $files
- do
+ for x in $files; do
file="$x"
- fid="$( fixid "$file" )"
- lfile="$( basename "$file" | sed "s|_input$|_label|g" )"
- labelname="$( basename "$file" | sed "s|_input$||g" )"
+ fid="$(fixid "$file")"
+ lfile="$(basename "$file" | sed "s|_input$|_label|g")"
+ labelname="$(basename "$file" | sed "s|_input$||g")"
- if [ ! "$path/$lfile" = "$file" ] && [ -f "$path/$lfile" ]
- then
- labelname="$( cat "$path/$lfile" )"
+ if [ ! "$path/$lfile" = "$file" ] && [ -f "$path/$lfile" ]; then
+ labelname="$(cat "$path/$lfile")"
fi
echo "DIMENSION $fid '$labelname' $algorithm $multiplier $divisor"
@@ -252,4 +248,3 @@ sensors_update() {
return 0
}
-
diff --git a/collectors/charts.d.plugin/squid/README.md b/collectors/charts.d.plugin/squid/README.md
index 0934ccfc..cfb61790 100644
--- a/collectors/charts.d.plugin/squid/README.md
+++ b/collectors/charts.d.plugin/squid/README.md
@@ -1,9 +1,8 @@
-> THIS MODULE IS OBSOLETE.
-> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
-
-
# squid
+> THIS MODULE IS OBSOLETE.
+> USE [THE PYTHON ONE](../../python.d.plugin/squid) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
+
The plugin will monitor a squid server.
It will produce 4 charts:
@@ -64,3 +63,5 @@ squid_update_every=5
```
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fsquid%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/charts.d.plugin/squid/squid.chart.sh b/collectors/charts.d.plugin/squid/squid.chart.sh
index cf5d1d78..ebddb32c 100644
--- a/collectors/charts.d.plugin/squid/squid.chart.sh
+++ b/collectors/charts.d.plugin/squid/squid.chart.sh
@@ -25,13 +25,10 @@ squid_get_stats() {
squid_autodetect() {
local host="127.0.0.1" port url x
- for port in 3128 8080
- do
- for url in "cache_object://$host:$port/counters" "/squid-internal-mgr/counters"
- do
+ for port in 3128 8080; do
+ for url in "cache_object://$host:$port/counters" "/squid-internal-mgr/counters"; do
x=$(squid_get_stats_internal "$host" "$port" "$url" | grep client_http.requests)
- if [ ! -z "$x" ]
- then
+ if [ ! -z "$x" ]; then
squid_host="$host"
squid_port="$port"
squid_url="$url"
@@ -50,8 +47,7 @@ squid_check() {
require_cmd sed || return 1
require_cmd egrep || return 1
- if [ -z "$squid_host" ] || [ -z "$squid_port" ] || [ -z "$squid_url" ]
- then
+ if [ -z "$squid_host" ] || [ -z "$squid_port" ] || [ -z "$squid_url" ]; then
squid_autodetect || return 1
fi
@@ -59,8 +55,7 @@ squid_check() {
local x
x="$(squid_get_stats | grep client_http.requests)"
# shellcheck disable=SC2181
- if [ ! $? -eq 0 ] || [ -z "$x" ]
- then
+ if [ ! $? -eq 0 ] || [ -z "$x" ]; then
error "cannot fetch URL '$squid_url' by connecting to $squid_host:$squid_port. Please set squid_url='url' and squid_host='host' and squid_port='port' in $confd/squid.conf"
return 1
fi
@@ -93,7 +88,6 @@ EOF
return 0
}
-
squid_update() {
# the first argument to this function is the microseconds since last update
# pass this parameter to the BEGIN statement (see bellow).
@@ -114,8 +108,8 @@ squid_update() {
# even if something goes wrong, no other code can be executed
# shellcheck disable=SC1117
- eval "$(squid_get_stats |\
- sed -e "s/ \+/ /g" -e "s/\./_/g" -e "s/^\([a-z0-9_]\+\) *= *\([0-9]\+\)$/local squid_\1=\2/g" |\
+ eval "$(squid_get_stats |
+ sed -e "s/ \+/ /g" -e "s/\./_/g" -e "s/^\([a-z0-9_]\+\) *= *\([0-9]\+\)$/local squid_\1=\2/g" |
grep -E "^local squid_(client_http|server_all)_[a-z0-9_]+=[0-9]+$")"
# write the result of the work.
diff --git a/collectors/charts.d.plugin/tomcat/README.md b/collectors/charts.d.plugin/tomcat/README.md
index d82951aa..84337860 100644
--- a/collectors/charts.d.plugin/tomcat/README.md
+++ b/collectors/charts.d.plugin/tomcat/README.md
@@ -1,2 +1,6 @@
+# tomcat
+
> THIS MODULE IS OBSOLETE.
-> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
+> USE [THE PYTHON ONE](../../python.d.plugin/tomcat) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Ftomcat%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/charts.d.plugin/tomcat/tomcat.chart.sh b/collectors/charts.d.plugin/tomcat/tomcat.chart.sh
index 294487b8..9ca75e63 100644
--- a/collectors/charts.d.plugin/tomcat/tomcat.chart.sh
+++ b/collectors/charts.d.plugin/tomcat/tomcat.chart.sh
@@ -32,24 +32,23 @@ tomcat_priority=60000
# will be in the proper units
tomcat_decimal_detail=1000000
-# used by volume chart to convert bytes to KB
-tomcat_decimal_KB_detail=1000
+# used by volume chart to convert bytes to kB
+tomcat_decimal_kB_detail=1000
tomcat_check() {
require_cmd xmlstarlet || return 1
-
# check if url, username, passwords are set
if [ -z "${tomcat_url}" ]; then
- error "tomcat url is unset or set to the empty string"
+ error "tomcat url is unset or set to the empty string"
return 1
fi
if [ -z "${tomcat_user}" ]; then
# check backwards compatibility
# shellcheck disable=SC2154
if [ -z "${tomcatUser}" ]; then
- error "tomcat user is unset or set to the empty string"
+ error "tomcat user is unset or set to the empty string"
return 1
else
tomcat_user="${tomcatUser}"
@@ -59,7 +58,7 @@ tomcat_check() {
# check backwards compatibility
# shellcheck disable=SC2154
if [ -z "${tomcatPassword}" ]; then
- error "tomcat password is unset or set to the empty string"
+ error "tomcat password is unset or set to the empty string"
return 1
else
tomcat_password="${tomcatPassword}"
@@ -69,8 +68,7 @@ tomcat_check() {
# check if we can get to tomcat's status page
tomcat_get
# shellcheck disable=2181
- if [ $? -ne 0 ]
- then
+ if [ $? -ne 0 ]; then
error "cannot get to status page on URL '${tomcat_url}'. Please make sure tomcat url, username and password are correct."
return 1
fi
@@ -84,8 +82,12 @@ tomcat_check() {
tomcat_get() {
# collect tomcat values
- tomcat_port="$(IFS=/ read -ra a <<< "$tomcat_url"; hostport=${a[2]}; echo "${hostport#*:}")"
- mapfile -t lines < <(run curl -u "$tomcat_user":"$tomcat_password" -Ss ${tomcat_curl_opts} "$tomcat_url" |\
+ tomcat_port="$(
+ IFS=/ read -ra a <<<"$tomcat_url"
+ hostport=${a[2]}
+ echo "${hostport#*:}"
+ )"
+ mapfile -t lines < <(run curl -u "$tomcat_user":"$tomcat_password" -Ss ${tomcat_curl_opts} "$tomcat_url" |
run xmlstarlet sel \
-t -m "/status/jvm/memory" -v @free \
-n -m "/status/connector[@name='\"http-bio-$tomcat_port\"']/threadInfo" -v @currentThreadCount \
@@ -107,8 +109,8 @@ tomcat_create() {
cat <<EOF
CHART tomcat.accesses '' "tomcat requests" "requests/s" statistics tomcat.accesses area $((tomcat_priority + 8)) $tomcat_update_every
DIMENSION accesses '' incremental
-CHART tomcat.volume '' "tomcat volume" "KB/s" volume tomcat.volume area $((tomcat_priority + 5)) $tomcat_update_every
-DIMENSION volume '' incremental divisor ${tomcat_decimal_KB_detail}
+CHART tomcat.volume '' "tomcat volume" "kB/s" volume tomcat.volume area $((tomcat_priority + 5)) $tomcat_update_every
+DIMENSION volume '' incremental divisor ${tomcat_decimal_kB_detail}
CHART tomcat.threads '' "tomcat threads" "current threads" statistics tomcat.threads line $((tomcat_priority + 6)) $tomcat_update_every
DIMENSION current '' absolute 1
DIMENSION busy '' absolute 1
diff --git a/collectors/checks.plugin/Makefile.in b/collectors/checks.plugin/Makefile.in
deleted file mode 100644
index faadbe58..00000000
--- a/collectors/checks.plugin/Makefile.in
+++ /dev/null
@@ -1,464 +0,0 @@
-# Makefile.in generated by automake 1.14.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2013 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-VPATH = @srcdir@
-am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-subdir = collectors/checks.plugin
-DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
- $(dist_noinst_DATA)
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-DATA = $(dist_noinst_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-dist_noinst_DATA = \
- README.md \
- $(NULL)
-
-all: all-am
-
-.SUFFIXES:
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/checks.plugin/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/checks.plugin/Makefile
-.PRECIOUS: Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-tags TAGS:
-
-ctags CTAGS:
-
-cscope cscopelist:
-
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(DATA)
-installdirs:
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-am
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-generic
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am:
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am:
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am:
-
-.MAKE: install-am install-strip
-
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dvi install-dvi-am install-exec \
- install-exec-am install-html install-html-am install-info \
- install-info-am install-man install-pdf install-pdf-am \
- install-ps install-ps-am install-strip installcheck \
- installcheck-am installdirs maintainer-clean \
- maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
- pdf-am ps ps-am tags-am uninstall uninstall-am
-
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/checks.plugin/README.md b/collectors/checks.plugin/README.md
index 503b96ad..461e3ba8 100644
--- a/collectors/checks.plugin/README.md
+++ b/collectors/checks.plugin/README.md
@@ -1,3 +1,5 @@
-# Netdata internal checks
+# checks.plugin
A debugging plugin (by default it is disabled)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fchecks.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/cups.plugin/Makefile.am b/collectors/cups.plugin/Makefile.am
new file mode 100644
index 00000000..ca4d4ddd
--- /dev/null
+++ b/collectors/cups.plugin/Makefile.am
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/collectors/cups.plugin/README.md b/collectors/cups.plugin/README.md
new file mode 100644
index 00000000..7baf8855
--- /dev/null
+++ b/collectors/cups.plugin/README.md
@@ -0,0 +1,49 @@
+# cups.plugin
+
+`cups.plugin` collects Common Unix Printing System (CUPS) metrics.
+
+## Prerequisites
+
+This plugin needs a running local CUPS daemon (`cupsd`). This plugin does not need any configuration.
+
+## Charts
+
+`cups.plugin` provides one common section `destinations` and one section per destination.
+
+> Destinations in CUPS represent individual printers or classes (collections or pools) of printers (https://www.cups.org/doc/cupspm.html#working-with-destinations)
+
+The section `server` provides these charts:
+
+1. **destinations by state**
+ * idle
+ * printing
+ * stopped
+
+2. **destinations by options**
+ * total
+ * accepting jobs
+ * shared
+
+3. **total job number by status**
+ * pending
+ * processing
+ * held
+
+4. **total job size by status**
+ * pending
+ * processing
+ * held
+
+For each destination the plugin provides these charts:
+
+1. **job number by status**
+ * pending
+ * held
+ * processing
+
+3. **job size by status**
+ * pending
+ * held
+ * processing
+
+At the moment only job status pending, processing, and held are reported because we do not have a method to collect stopped, canceled, aborted and completed jobs which scales.
diff --git a/collectors/cups.plugin/cups_plugin.c b/collectors/cups.plugin/cups_plugin.c
new file mode 100644
index 00000000..7fbba2c4
--- /dev/null
+++ b/collectors/cups.plugin/cups_plugin.c
@@ -0,0 +1,449 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+/*
+ * netdata cups.plugin
+ * (C) Copyright 2017-2018 Simon Nagl <simon.nagl@gmx.de>
+ * Released under GPL v3+
+ */
+
+#include "../../libnetdata/libnetdata.h"
+#include <limits.h>
+
+// callback required by fatal()
+void netdata_cleanup_and_exit(int ret) {
+ exit(ret);
+}
+
+void send_statistics( const char *action, const char *action_result, const char *action_data) {
+ (void) action;
+ (void) action_result;
+ (void) action_data;
+ return;
+}
+
+// callbacks required by popen()
+void signals_block(void) {};
+void signals_unblock(void) {};
+void signals_reset(void) {};
+
+// callback required by eval()
+int health_variable_lookup(const char *variable, uint32_t hash, struct rrdcalc *rc, calculated_number *result) {
+ (void)variable;
+ (void)hash;
+ (void)rc;
+ (void)result;
+ return 0;
+};
+
+// required by get_system_cpus()
+char *netdata_configured_host_prefix = "";
+
+// Variables
+
+static int debug = 0;
+
+static int netdata_update_every = 1;
+static int netdata_priority = 100004;
+
+
+#ifdef HAVE_CUPS
+#include <cups/cups.h>
+
+http_t *http; // connection to the cups daemon
+
+/*
+ * Used to aggregate job metrics for a destination (and all destianations).
+ */
+struct job_metrics {
+ int is_collected; // flag if this was collected in the current cycle
+
+ int num_pending;
+ int num_processing;
+ int num_held;
+
+ int size_pending; // in kilobyte
+ int size_processing; // in kilobyte
+ int size_held; // in kilobyte
+};
+DICTIONARY *dict_dest_job_metrics = NULL;
+struct job_metrics global_job_metrics;
+
+int num_dest_total;
+int num_dest_accepting_jobs;
+int num_dest_shared;
+
+int num_dest_idle;
+int num_dest_printing;
+int num_dest_stopped;
+
+void print_help() {
+ fprintf(stderr,
+ "\n"
+ "netdata cups.plugin %s\n"
+ "\n"
+ "Copyright (C) 2017-2018 Simon Nagl <simon.nagl@gmx.de>\n"
+ "Released under GNU General Public License v3+.\n"
+ "All rights reserved.\n"
+ "\n"
+ "This program is a data collector plugin for netdata.\n"
+ "\n"
+ "SYNOPSIS: cups.plugin [-d][-h][-v] COLLECTION_FREQUENCY\n"
+ "\n"
+ "Options:"
+ "\n"
+ " COLLECTION_FREQUENCY data collection frequency in seconds\n"
+ "\n"
+ " -d enable verbose output\n"
+ " default: disabled\n"
+ "\n"
+ " -v print version and exit\n"
+ "\n"
+ " -h print this message and exit\n"
+ "\n",
+ VERSION);
+}
+
+void parse_command_line(int argc, char **argv) {
+ int i;
+ int freq = 0;
+ int update_every_found = 0;
+ for (i = 1; i < argc; i++) {
+ if (isdigit(*argv[i]) && !update_every_found) {
+ int n = str2i(argv[i]);
+ if (n > 0 && n < 86400) {
+ freq = n;
+ continue;
+ }
+ } else if (strcmp("-v", argv[i]) == 0) {
+ printf("cups.plugin %s\n", VERSION);
+ exit(0);
+ } else if (strcmp("-d", argv[i]) == 0) {
+ debug = 1;
+ continue;
+ } else if (strcmp("-h", argv[i]) == 0) {
+ print_help();
+ exit(0);
+ }
+
+ print_help();
+ exit(1);
+ }
+
+ if (freq >= netdata_update_every) {
+ netdata_update_every = freq;
+ } else if (freq) {
+ error("update frequency %d seconds is too small for CUPS. Using %d.", freq, netdata_update_every);
+ }
+}
+
+int reset_job_metrics(void *entry, void *data) {
+ (void)data;
+
+ struct job_metrics *jm = (struct job_metrics *)entry;
+
+ jm->is_collected = 0;
+ jm->num_held = 0;
+ jm->num_pending = 0;
+ jm->num_processing = 0;
+ jm->size_held = 0;
+ jm->size_pending = 0;
+ jm->size_processing = 0;
+
+ return 0;
+}
+
+struct job_metrics *get_job_metrics(char *dest) {
+ struct job_metrics *jm = dictionary_get(dict_dest_job_metrics, dest);
+
+ if (unlikely(!jm)) {
+ struct job_metrics new_job_metrics;
+ reset_job_metrics(&new_job_metrics, NULL);
+ jm = dictionary_set(dict_dest_job_metrics, dest, &new_job_metrics, sizeof(struct job_metrics));
+
+ printf("CHART cups.job_num_%s '' 'Active job number of destination %s' jobs '%s' job_num stacked %i %i\n", dest, dest, dest, netdata_priority++, netdata_update_every);
+ printf("DIMENSION pending '' absolute 1 1\n");
+ printf("DIMENSION held '' absolute 1 1\n");
+ printf("DIMENSION processing '' absolute 1 1\n");
+
+ printf("CHART cups.job_size_%s '' 'Active job size of destination %s' KB '%s' job_size stacked %i %i\n", dest, dest, dest, netdata_priority++, netdata_update_every);
+ printf("DIMENSION pending '' absolute 1 1\n");
+ printf("DIMENSION held '' absolute 1 1\n");
+ printf("DIMENSION processing '' absolute 1 1\n");
+ };
+ return jm;
+}
+
+int collect_job_metrics(char *name, void *entry, void *data) {
+ (void)data;
+
+ struct job_metrics *jm = (struct job_metrics *)entry;
+
+ if (jm->is_collected) {
+ printf(
+ "BEGIN cups.job_num_%s\n"
+ "SET pending = %d\n"
+ "SET held = %d\n"
+ "SET processing = %d\n"
+ "END\n",
+ name, jm->num_pending, jm->num_held, jm->num_processing);
+ printf(
+ "BEGIN cups.job_size_%s\n"
+ "SET pending = %d\n"
+ "SET held = %d\n"
+ "SET processing = %d\n"
+ "END\n",
+ name, jm->size_pending, jm->size_held, jm->size_processing);
+ } else {
+ printf("CHART cups.job_num_%s '' 'Active job number of destination %s' jobs '%s' job_num stacked 1 %i 'obsolete'\n", name, name, name, netdata_update_every);
+ printf("DIMENSION pending '' absolute 1 1\n");
+ printf("DIMENSION held '' absolute 1 1\n");
+ printf("DIMENSION processing '' absolute 1 1\n");
+
+ printf("CHART cups.job_size_%s '' 'Active job size of destination %s' KB '%s' job_size stacked 1 %i 'obsolete'\n", name, name, name, netdata_update_every);
+ printf("DIMENSION pending '' absolute 1 1\n");
+ printf("DIMENSION held '' absolute 1 1\n");
+ printf("DIMENSION processing '' absolute 1 1\n");
+ dictionary_del(dict_dest_job_metrics, name);
+ }
+
+ return 0;
+}
+
+void reset_metrics() {
+ num_dest_total = 0;
+ num_dest_accepting_jobs = 0;
+ num_dest_shared = 0;
+
+ num_dest_idle = 0;
+ num_dest_printing = 0;
+ num_dest_stopped = 0;
+
+ reset_job_metrics(&global_job_metrics, NULL);
+ dictionary_get_all(dict_dest_job_metrics, reset_job_metrics, NULL);
+}
+
+int main(int argc, char **argv) {
+
+ // ------------------------------------------------------------------------
+ // initialization of netdata plugin
+
+ program_name = "cups.plugin";
+
+ // disable syslog
+ error_log_syslog = 0;
+
+ // set errors flood protection to 100 logs per hour
+ error_log_errors_per_period = 100;
+ error_log_throttle_period = 3600;
+
+ parse_command_line(argc, argv);
+
+ errno = 0;
+
+ dict_dest_job_metrics = dictionary_create(DICTIONARY_FLAG_SINGLE_THREADED);
+
+ // ------------------------------------------------------------------------
+ // the main loop
+
+ if (debug)
+ fprintf(stderr, "starting data collection\n");
+
+ time_t started_t = now_monotonic_sec();
+ size_t iteration = 0;
+ usec_t step = netdata_update_every * USEC_PER_SEC;
+
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ for (iteration = 0; 1; iteration++)
+ {
+ heartbeat_next(&hb, step);
+
+ if (unlikely(netdata_exit))
+ {
+ break;
+ }
+
+ reset_metrics();
+
+ cups_dest_t *dests;
+ num_dest_total = cupsGetDests2(http, &dests);
+
+ if(unlikely(num_dest_total == 0)) {
+ // reconnect to cups to check if the server is down.
+ httpClose(http);
+ http = httpConnect2(cupsServer(), ippPort(), NULL, AF_UNSPEC, cupsEncryption(), 0, netdata_update_every * 1000, NULL);
+ if(http == NULL) {
+ error("cups daemon is not running. Exiting!");
+ exit(1);
+ }
+ }
+
+ cups_dest_t *curr_dest = dests;
+ int counter = 0;
+ while (counter < num_dest_total) {
+ if (counter != 0) {
+ curr_dest++;
+ }
+ counter++;
+
+ const char *printer_uri_supported = cupsGetOption("printer-uri-supported", curr_dest->num_options, curr_dest->options);
+ if (!printer_uri_supported) {
+ if(debug)
+ fprintf(stderr, "destination %s discovered, but not yet setup as a local printer", curr_dest->name);
+ continue;
+ }
+
+ const char *printer_is_accepting_jobs = cupsGetOption("printer-is-accepting-jobs", curr_dest->num_options, curr_dest->options);
+ if (printer_is_accepting_jobs && !strcmp(printer_is_accepting_jobs, "true")) {
+ num_dest_accepting_jobs++;
+ }
+
+ const char *printer_is_shared = cupsGetOption("printer-is-shared", curr_dest->num_options, curr_dest->options);
+ if (printer_is_shared && !strcmp(printer_is_shared, "true")) {
+ num_dest_shared++;
+ }
+
+ // TODO use cupsGetIntegerOption
+ int printer_state = cupsGetIntegerOption("printer-state", curr_dest->num_options, curr_dest->options);
+ switch (printer_state) {
+ case 3:
+ num_dest_idle++;
+ break;
+ case 4:
+ num_dest_printing++;
+ break;
+ case 5:
+ num_dest_stopped++;
+ break;
+ case INT_MIN:
+ if(debug)
+ fprintf(stderr, "printer state is missing for destination %s", curr_dest->name);
+ break;
+ default:
+ error("Unknown printer state (%d) found.", printer_state);
+ break;
+ }
+
+ /*
+ * flag job metrics to print values.
+ * This is needed to report also destinations with zero active jobs.
+ */
+ struct job_metrics *jm = get_job_metrics(curr_dest->name);
+ jm->is_collected = 1;
+ }
+ cupsFreeDests(num_dest_total, dests);
+
+ if (unlikely(netdata_exit))
+ break;
+
+ cups_job_t *jobs, *curr_job;
+ int num_jobs = cupsGetJobs2(http, &jobs, NULL, 0, CUPS_WHICHJOBS_ACTIVE);
+ int i;
+ for (i = num_jobs, curr_job = jobs; i > 0; i--, curr_job++) {
+ struct job_metrics *jm = get_job_metrics(curr_job->dest);
+ jm->is_collected = 1;
+
+ switch (curr_job->state) {
+ case IPP_JOB_PENDING:
+ jm->num_pending++;
+ jm->size_pending += curr_job->size;
+ global_job_metrics.num_pending++;
+ global_job_metrics.size_pending += curr_job->size;
+ break;
+ case IPP_JOB_HELD:
+ jm->num_held++;
+ jm->size_held += curr_job->size;
+ global_job_metrics.num_held++;
+ global_job_metrics.size_held += curr_job->size;
+ break;
+ case IPP_JOB_PROCESSING:
+ jm->num_processing++;
+ jm->size_processing += curr_job->size;
+ global_job_metrics.num_processing++;
+ global_job_metrics.size_processing += curr_job->size;
+ break;
+ default:
+ error("Unsupported job state (%u) found.", curr_job->state);
+ break;
+ }
+ }
+ cupsFreeJobs(num_jobs, jobs);
+
+ dictionary_get_all_name_value(dict_dest_job_metrics, collect_job_metrics, NULL);
+
+ static int cups_printer_by_option_created = 0;
+ if (unlikely(!cups_printer_by_option_created))
+ {
+ cups_printer_by_option_created = 1;
+ printf("CHART cups.dest_state '' 'Destinations by state' dests overview dests stacked 100000 %i\n", netdata_update_every);
+ printf("DIMENSION idle '' absolute 1 1\n");
+ printf("DIMENSION printing '' absolute 1 1\n");
+ printf("DIMENSION stopped '' absolute 1 1\n");
+
+ printf("CHART cups.dest_option '' 'Destinations by option' dests overview dests line 100001 %i\n", netdata_update_every);
+ printf("DIMENSION total '' absolute 1 1\n");
+ printf("DIMENSION acceptingjobs '' absolute 1 1\n");
+ printf("DIMENSION shared '' absolute 1 1\n");
+
+ printf("CHART cups.job_num '' 'Total active job number' jobs overview job_num stacked 100002 %i\n", netdata_update_every);
+ printf("DIMENSION pending '' absolute 1 1\n");
+ printf("DIMENSION held '' absolute 1 1\n");
+ printf("DIMENSION processing '' absolute 1 1\n");
+
+ printf("CHART cups.job_size '' 'Total active job size' KB overview job_size stacked 100003 %i\n", netdata_update_every);
+ printf("DIMENSION pending '' absolute 1 1\n");
+ printf("DIMENSION held '' absolute 1 1\n");
+ printf("DIMENSION processing '' absolute 1 1\n");
+ }
+
+ printf(
+ "BEGIN cups.dest_state\n"
+ "SET idle = %d\n"
+ "SET printing = %d\n"
+ "SET stopped = %d\n"
+ "END\n",
+ num_dest_idle, num_dest_printing, num_dest_stopped);
+ printf(
+ "BEGIN cups.dest_option\n"
+ "SET total = %d\n"
+ "SET acceptingjobs = %d\n"
+ "SET shared = %d\n"
+ "END\n",
+ num_dest_total, num_dest_accepting_jobs, num_dest_shared);
+ printf(
+ "BEGIN cups.job_num\n"
+ "SET pending = %d\n"
+ "SET held = %d\n"
+ "SET processing = %d\n"
+ "END\n",
+ global_job_metrics.num_pending, global_job_metrics.num_held, global_job_metrics.num_processing);
+ printf(
+ "BEGIN cups.job_size\n"
+ "SET pending = %d\n"
+ "SET held = %d\n"
+ "SET processing = %d\n"
+ "END\n",
+ global_job_metrics.size_pending, global_job_metrics.size_held, global_job_metrics.size_processing);
+
+ fflush(stdout);
+
+ if (unlikely(netdata_exit))
+ break;
+
+ // restart check (14400 seconds)
+ if (!now_monotonic_sec() - started_t > 14400)
+ break;
+ }
+
+ httpClose(http);
+ info("CUPS process exiting");
+}
+
+#else // !HAVE_CUPS
+
+int main(int argc, char **argv)
+{
+ fatal("cups.plugin is not compiled.");
+}
+
+#endif // !HAVE_CUPS
diff --git a/collectors/diskspace.plugin/Makefile.in b/collectors/diskspace.plugin/Makefile.in
deleted file mode 100644
index ceebc545..00000000
--- a/collectors/diskspace.plugin/Makefile.in
+++ /dev/null
@@ -1,464 +0,0 @@
-# Makefile.in generated by automake 1.14.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2013 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-VPATH = @srcdir@
-am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-subdir = collectors/diskspace.plugin
-DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
- $(dist_noinst_DATA)
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-DATA = $(dist_noinst_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-dist_noinst_DATA = \
- README.md \
- $(NULL)
-
-all: all-am
-
-.SUFFIXES:
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/diskspace.plugin/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/diskspace.plugin/Makefile
-.PRECIOUS: Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-tags TAGS:
-
-ctags CTAGS:
-
-cscope cscopelist:
-
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(DATA)
-installdirs:
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-am
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-generic
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am:
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am:
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am:
-
-.MAKE: install-am install-strip
-
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dvi install-dvi-am install-exec \
- install-exec-am install-html install-html-am install-info \
- install-info-am install-man install-pdf install-pdf-am \
- install-ps install-ps-am install-strip installcheck \
- installcheck-am installdirs maintainer-clean \
- maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
- pdf-am ps ps-am tags-am uninstall uninstall-am
-
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/diskspace.plugin/README.md b/collectors/diskspace.plugin/README.md
index f7d0e7b4..d743312c 100644
--- a/collectors/diskspace.plugin/README.md
+++ b/collectors/diskspace.plugin/README.md
@@ -2,5 +2,34 @@
This plugin monitors the disk space usage of mounted disks, under Linux.
+Two charts are available for every mount:
+ - Disk Space Usage
+ - Disk Files (inodes) Usage
+
+## configuration
+
+Simple patterns can be used to exclude mounts from showed statistics based on path or filesystem. By default read-only mounts are not displayed. To display them `yes` should be set for a chart instead of `auto`.
+
+```
+[plugin:proc:diskspace]
+ # remove charts of unmounted disks = yes
+ # update every = 1
+ # check for new mount points every = 15
+ # exclude space metrics on paths = /proc/* /sys/* /var/run/user/* /run/user/* /snap/* /var/lib/docker/*
+ # exclude space metrics on filesystems = *gvfs *gluster* *s3fs *ipfs *davfs2 *httpfs *sshfs *gdfs *moosefs fusectl
+ # space usage for all disks = auto
+ # inodes usage for all disks = auto
+```
+
+Charts can be enabled/disabled for every mount separately:
+
+```
+[plugin:proc:diskspace:/]
+ # space usage = auto
+ # inodes usage = auto
+```
+
> for disks performance monitoring, see the `proc` plugin, [here](../proc.plugin/#monitoring-disks)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fdiskspace.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/diskspace.plugin/plugin_diskspace.c b/collectors/diskspace.plugin/plugin_diskspace.c
index dca7c907..77b87b09 100644
--- a/collectors/diskspace.plugin/plugin_diskspace.c
+++ b/collectors/diskspace.plugin/plugin_diskspace.c
@@ -190,7 +190,7 @@ static inline void do_disk_space_stats(struct mountinfo *mi, int update_every) {
if(unlikely(m->do_space == CONFIG_BOOLEAN_NO && m->do_inodes == CONFIG_BOOLEAN_NO))
return;
- if(unlikely(mi->flags & MOUNTINFO_READONLY && !m->collected))
+ if(unlikely(mi->flags & MOUNTINFO_READONLY && !m->collected && m->do_space != CONFIG_BOOLEAN_YES && m->do_inodes != CONFIG_BOOLEAN_YES))
return;
struct statvfs buff_statvfs;
@@ -263,7 +263,7 @@ static inline void do_disk_space_stats(struct mountinfo *mi, int update_every) {
, family
, "disk.space"
, title
- , "GB"
+ , "GiB"
, PLUGIN_DISKSPACE_NAME
, NULL
, NETDATA_CHART_PRIO_DISKSPACE_SPACE
@@ -303,7 +303,7 @@ static inline void do_disk_space_stats(struct mountinfo *mi, int update_every) {
, family
, "disk.inodes"
, title
- , "Inodes"
+ , "inodes"
, PLUGIN_DISKSPACE_NAME
, NULL
, NETDATA_CHART_PRIO_DISKSPACE_INODES
diff --git a/collectors/fping.plugin/Makefile.in b/collectors/fping.plugin/Makefile.in
deleted file mode 100644
index 67b9699b..00000000
--- a/collectors/fping.plugin/Makefile.in
+++ /dev/null
@@ -1,591 +0,0 @@
-# Makefile.in generated by automake 1.14.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2013 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-
-VPATH = @srcdir@
-am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-DIST_COMMON = $(top_srcdir)/build/subst.inc $(srcdir)/Makefile.in \
- $(srcdir)/Makefile.am $(dist_plugins_SCRIPTS) \
- $(dist_libconfig_DATA) $(dist_noinst_DATA)
-subdir = collectors/fping.plugin
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
-am__vpath_adj = case $$p in \
- $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
- *) f=$$p;; \
- esac;
-am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
-am__install_max = 40
-am__nobase_strip_setup = \
- srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
-am__nobase_strip = \
- for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
-am__nobase_list = $(am__nobase_strip_setup); \
- for p in $$list; do echo "$$p $$p"; done | \
- sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
- $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
- if (++n[$$2] == $(am__install_max)) \
- { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
- END { for (dir in files) print dir, files[dir] }'
-am__base_list = \
- sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
- sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
-am__uninstall_files_from_dir = { \
- test -z "$$files" \
- || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
- || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
- $(am__cd) "$$dir" && rm -f $$files; }; \
- }
-am__installdirs = "$(DESTDIR)$(pluginsdir)" \
- "$(DESTDIR)$(libconfigdir)"
-SCRIPTS = $(dist_plugins_SCRIPTS)
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-DATA = $(dist_libconfig_DATA) $(dist_noinst_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-CLEANFILES = \
- fping.plugin \
- $(NULL)
-
-SUFFIXES = .in
-dist_plugins_SCRIPTS = \
- fping.plugin \
- $(NULL)
-
-dist_noinst_DATA = \
- fping.plugin.in \
- README.md \
- $(NULL)
-
-dist_libconfig_DATA = \
- fping.conf \
- $(NULL)
-
-all: all-am
-
-.SUFFIXES:
-.SUFFIXES: .in
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/fping.plugin/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/fping.plugin/Makefile
-.PRECIOUS: Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-$(top_srcdir)/build/subst.inc:
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS)
- @$(NORMAL_INSTALL)
- @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
- done | \
- sed -e 'p;s,.*/,,;n' \
- -e 'h;s|.*|.|' \
- -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
- $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
- { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
- if ($$2 == $$4) { files[d] = files[d] " " $$1; \
- if (++n[d] == $(am__install_max)) { \
- print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
- else { print "f", d "/" $$4, $$1 } } \
- END { for (d in files) print "f", d, files[d] }' | \
- while read type dir files; do \
- if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
- test -z "$$files" || { \
- echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \
- $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \
- } \
- ; done
-
-uninstall-dist_pluginsSCRIPTS:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \
- files=`for p in $$list; do echo "$$p"; done | \
- sed -e 's,.*/,,;$(transform)'`; \
- dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir)
-install-dist_libconfigDATA: $(dist_libconfig_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \
- done
-
-uninstall-dist_libconfigDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir)
-tags TAGS:
-
-ctags CTAGS:
-
-cscope cscopelist:
-
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(SCRIPTS) $(DATA)
-installdirs:
- for dir in "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(libconfigdir)"; do \
- test -z "$$dir" || $(MKDIR_P) "$$dir"; \
- done
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
- -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-am
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-generic
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am: install-dist_libconfigDATA \
- install-dist_pluginsSCRIPTS
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am:
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am: uninstall-dist_libconfigDATA \
- uninstall-dist_pluginsSCRIPTS
-
-.MAKE: install-am install-strip
-
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dist_libconfigDATA \
- install-dist_pluginsSCRIPTS install-dvi install-dvi-am \
- install-exec install-exec-am install-html install-html-am \
- install-info install-info-am install-man install-pdf \
- install-pdf-am install-ps install-ps-am install-strip \
- installcheck installcheck-am installdirs maintainer-clean \
- maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
- pdf-am ps ps-am tags-am uninstall uninstall-am \
- uninstall-dist_libconfigDATA uninstall-dist_pluginsSCRIPTS
-
-.in:
- if sed \
- -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \
- -e 's#[@]sbindir_POST@#$(sbindir)#g' \
- -e 's#[@]sysconfdir_POST@#$(sysconfdir)#g' \
- -e 's#[@]pythondir_POST@#$(pythondir)#g' \
- -e 's#[@]configdir_POST@#$(configdir)#g' \
- -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \
- -e 's#[@]cachedir_POST@#$(cachedir)#g' \
- $< > $@.tmp; then \
- mv "$@.tmp" "$@"; \
- else \
- rm -f "$@.tmp"; \
- false; \
- fi
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/fping.plugin/README.md b/collectors/fping.plugin/README.md
index a83b7912..d5f83fdf 100644
--- a/collectors/fping.plugin/README.md
+++ b/collectors/fping.plugin/README.md
@@ -94,3 +94,5 @@ That's it. netdata will detect the new plugin and start it.
You can name the new plugin any name you like.
Just make sure the plugin and the configuration file have the same name.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Ffping.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/fping.plugin/fping.plugin b/collectors/fping.plugin/fping.plugin
deleted file mode 100644
index cf8f17e9..00000000
--- a/collectors/fping.plugin/fping.plugin
+++ /dev/null
@@ -1,200 +0,0 @@
-#!/usr/bin/env bash
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
-# GPL v3+
-#
-# This plugin requires a latest version of fping.
-# You can compile it from source, by running me with option: install
-
-export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin"
-export LC_ALL=C
-
-if [ "${1}" = "install" ]
- then
- [ "${UID}" != 0 ] && echo >&2 "Please run me as root. This will install a single binary file: /usr/local/bin/fping." && exit 1
-
- run() {
- printf >&2 " > "
- printf >&2 "%q " "${@}"
- printf >&2 "\n"
- "${@}" || exit 1
- }
-
- download() {
- local curl="$(which curl 2>/dev/null || command -v curl 2>/dev/null)"
- [ ! -z "${curl}" ] && run curl -s -L "${1}" && return 0
-
- local wget="$(which wget 2>/dev/null || command -v wget 2>/dev/null)"
- [ ! -z "${wget}" ] && run wget -q -O - "${1}" && return 0
-
- echo >&2 "Cannot find 'curl' or 'wget' in this system." && exit 1
- }
-
- [ ! -d /usr/src ] && run mkdir -p /usr/src
- [ ! -d /usr/local/bin ] && run mkdir -p /usr/local/bin
-
- run cd /usr/src
-
- if [ -d fping-4.0 ]
- then
- run rm -rf fping-4.0 || exit 1
- fi
-
- download 'https://github.com/schweikert/fping/releases/download/v4.0/fping-4.0.tar.gz' | run tar -zxvpf -
- [ $? -ne 0 ] && exit 1
- run cd fping-4.0 || exit 1
-
- run ./configure --prefix=/usr/local
- run make clean
- run make
- if [ -f /usr/local/bin/fping ]
- then
- run mv -f /usr/local/bin/fping /usr/local/bin/fping.old
- fi
- run mv src/fping /usr/local/bin/fping
- run chown root:root /usr/local/bin/fping
- run chmod 4755 /usr/local/bin/fping
- echo >&2
- echo >&2 "All done, you have a compatible fping now at /usr/local/bin/fping."
- echo >&2
-
- fping="$(which fping 2>/dev/null || command -v fping 2>/dev/null)"
- if [ "${fping}" != "/usr/local/bin/fping" ]
- then
- echo >&2 "You have another fping installed at: ${fping}."
- echo >&2 "Please set:"
- echo >&2
- echo >&2 " fping=\"/usr/local/bin/fping\""
- echo >&2
- echo >&2 "at /etc/netdata/fping.conf"
- echo >&2
- fi
- exit 0
-fi
-
-# -----------------------------------------------------------------------------
-
-PROGRAM_NAME="$(basename "${0}")"
-
-logdate() {
- date "+%Y-%m-%d %H:%M:%S"
-}
-
-log() {
- local status="${1}"
- shift
-
- echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}"
-
-}
-
-warning() {
- log WARNING "${@}"
-}
-
-error() {
- log ERROR "${@}"
-}
-
-info() {
- log INFO "${@}"
-}
-
-fatal() {
- log FATAL "${@}"
- echo "DISABLE"
- exit 1
-}
-
-debug=0
-debug() {
- [ $debug -eq 1 ] && log DEBUG "${@}"
-}
-
-# -----------------------------------------------------------------------------
-
-# store in ${plugin} the name we run under
-# this allows us to copy/link fping.plugin under a different name
-# to have multiple fping plugins running with different settings
-plugin="${PROGRAM_NAME/.plugin/}"
-
-
-# -----------------------------------------------------------------------------
-
-# the frequency to send info to netdata
-# passed by netdata as the first parameter
-update_every="${1-1}"
-
-# the netdata configuration directory
-# passed by netdata as an environment variable
-[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="/usr/local/etc/netdata"
-[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="/usr/local/lib/netdata/conf.d"
-
-# -----------------------------------------------------------------------------
-# configuration options
-# can be overwritten at /etc/netdata/fping.conf
-
-# the fping binary to use
-# we need one that can output netdata friendly info (supporting: -N)
-# if you have multiple versions, put here the full filename of the right one
-fping="$( which fping 2>/dev/null || command -v fping 2>/dev/null )"
-
-# a space separated list of hosts to fping
-# we suggest to put names here and the IPs of these names in /etc/hosts
-hosts=""
-
-# the time in milliseconds (1 sec = 1000 ms)
-# to ping the hosts - by default 5 pings per host per iteration
-ping_every="$((update_every * 1000 / 5))"
-
-# fping options
-fping_opts="-R -b 56 -i 1 -r 0 -t 5000"
-
-# -----------------------------------------------------------------------------
-# load the configuration files
-
-for CONFIG in "${NETDATA_STOCK_CONFIG_DIR}/${plugin}.conf" "${NETDATA_USER_CONFIG_DIR}/${plugin}.conf"
-do
- if [ -f "${CONFIG}" ]
- then
- info "Loading config file '${CONFIG}'..."
- source "${CONFIG}"
- [ $? -ne 0 ] && error "Failed to load config file '${CONFIG}'."
- else
- warning "Cannot find file '${CONFIG}'."
- fi
-done
-
-if [ -z "${hosts}" ]
-then
- fatal "no hosts configured - nothing to do."
-fi
-
-if [ -z "${fping}" ]
-then
- fatal "fping command is not found. Please set its full path in '${NETDATA_USER_CONFIG_DIR}/${plugin}.conf'"
-fi
-
-if [ ! -x "${fping}" ]
-then
- fatal "fping command '${fping}' is not executable - cannot proceed."
-fi
-
-if [ ${ping_every} -lt 20 ]
- then
- warning "ping every was set to ${ping_every} but 20 is the minimum for non-root users. Setting it to 20 ms."
- ping_every=20
-fi
-
-# the fping options we will use
-options=( -N -l -Q ${update_every} -p ${ping_every} ${fping_opts} ${hosts} )
-
-# execute fping
-info "starting fping: ${fping} ${options[*]}"
-exec "${fping}" "${options[@]}"
-
-# if we cannot execute fping, stop
-fatal "command '${fping} ${options[*]}' failed to be executed (returned code $?)."
diff --git a/collectors/freebsd.plugin/Makefile.in b/collectors/freebsd.plugin/Makefile.in
deleted file mode 100644
index d3332677..00000000
--- a/collectors/freebsd.plugin/Makefile.in
+++ /dev/null
@@ -1,464 +0,0 @@
-# Makefile.in generated by automake 1.14.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2013 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-VPATH = @srcdir@
-am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-subdir = collectors/freebsd.plugin
-DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
- $(dist_noinst_DATA)
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-DATA = $(dist_noinst_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-dist_noinst_DATA = \
- README.md \
- $(NULL)
-
-all: all-am
-
-.SUFFIXES:
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/freebsd.plugin/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/freebsd.plugin/Makefile
-.PRECIOUS: Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-tags TAGS:
-
-ctags CTAGS:
-
-cscope cscopelist:
-
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(DATA)
-installdirs:
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-am
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-generic
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am:
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am:
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am:
-
-.MAKE: install-am install-strip
-
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dvi install-dvi-am install-exec \
- install-exec-am install-html install-html-am install-info \
- install-info-am install-man install-pdf install-pdf-am \
- install-ps install-ps-am install-strip installcheck \
- installcheck-am installdirs maintainer-clean \
- maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
- pdf-am ps ps-am tags-am uninstall uninstall-am
-
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/freebsd.plugin/README.md b/collectors/freebsd.plugin/README.md
index e6302f42..237e6092 100644
--- a/collectors/freebsd.plugin/README.md
+++ b/collectors/freebsd.plugin/README.md
@@ -1,3 +1,5 @@
-# freebsd
+# freebsd.plugin
Collects resource usage and performance data on FreeBSD systems
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Ffreebsd.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/freebsd.plugin/freebsd_devstat.c b/collectors/freebsd.plugin/freebsd_devstat.c
index 10279aab..81a301e4 100644
--- a/collectors/freebsd.plugin/freebsd_devstat.c
+++ b/collectors/freebsd.plugin/freebsd_devstat.c
@@ -256,7 +256,7 @@ int do_kern_devstat(int update_every, usec_t dt) {
disks_found = 0;
- dstat = devstat_data + sizeof(long); // skip generation number
+ dstat = (struct devstat*)((char*)devstat_data + sizeof(long)); // skip generation number
for (i = 0; i < numdevs; i++) {
if (likely(do_system_io)) {
@@ -360,10 +360,10 @@ int do_kern_devstat(int update_every, usec_t dt) {
disk,
"disk.io",
"Disk I/O Bandwidth",
- "kilobytes/s",
- "freebsd.plugin",
+ "KiB/s",
+ "freebsd.plugin",
"devstat",
- NETDATA_CHART_PRIO_DISK_IO,
+ NETDATA_CHART_PRIO_DISK_IO,
update_every,
RRDSET_TYPE_AREA
);
@@ -398,9 +398,9 @@ int do_kern_devstat(int update_every, usec_t dt) {
"disk.ops",
"Disk Completed I/O Operations",
"operations/s",
- "freebsd.plugin",
- "devstat",
- NETDATA_CHART_PRIO_DISK_OPS,
+ "freebsd.plugin",
+ "devstat",
+ NETDATA_CHART_PRIO_DISK_OPS,
update_every,
RRDSET_TYPE_LINE
);
@@ -437,9 +437,9 @@ int do_kern_devstat(int update_every, usec_t dt) {
"disk.qops",
"Disk Current I/O Operations",
"operations",
- "freebsd.plugin",
+ "freebsd.plugin",
"devstat",
- NETDATA_CHART_PRIO_DISK_QOPS,
+ NETDATA_CHART_PRIO_DISK_QOPS,
update_every,
RRDSET_TYPE_LINE
);
@@ -466,9 +466,9 @@ int do_kern_devstat(int update_every, usec_t dt) {
"disk.util",
"Disk Utilization Time",
"% of time working",
- "freebsd.plugin",
+ "freebsd.plugin",
"devstat",
- NETDATA_CHART_PRIO_DISK_UTIL,
+ NETDATA_CHART_PRIO_DISK_UTIL,
update_every,
RRDSET_TYPE_AREA
);
@@ -499,9 +499,9 @@ int do_kern_devstat(int update_every, usec_t dt) {
"disk.iotime",
"Disk Total I/O Time",
"milliseconds/s",
- "freebsd.plugin",
+ "freebsd.plugin",
"devstat",
- NETDATA_CHART_PRIO_DISK_IOTIME,
+ NETDATA_CHART_PRIO_DISK_IOTIME,
update_every,
RRDSET_TYPE_LINE
);
@@ -546,10 +546,10 @@ int do_kern_devstat(int update_every, usec_t dt) {
disk,
"disk.await",
"Average Completed I/O Operation Time",
- "ms per operation",
- "freebsd.plugin",
+ "milliseconds/operation",
+ "freebsd.plugin",
"devstat",
- NETDATA_CHART_PRIO_DISK_AWAIT,
+ NETDATA_CHART_PRIO_DISK_AWAIT,
update_every,
RRDSET_TYPE_LINE
);
@@ -611,10 +611,10 @@ int do_kern_devstat(int update_every, usec_t dt) {
disk,
"disk.avgsz",
"Average Completed I/O Operation Bandwidth",
- "kilobytes per operation",
- "freebsd.plugin",
+ "KiB/operation",
+ "freebsd.plugin",
"devstat",
- NETDATA_CHART_PRIO_DISK_AVGSZ,
+ NETDATA_CHART_PRIO_DISK_AVGSZ,
update_every,
RRDSET_TYPE_AREA
);
@@ -668,10 +668,10 @@ int do_kern_devstat(int update_every, usec_t dt) {
disk,
"disk.svctm",
"Average Service Time",
- "ms per operation",
- "freebsd.plugin",
+ "milliseconds/operation",
+ "freebsd.plugin",
"devstat",
- NETDATA_CHART_PRIO_DISK_SVCTM,
+ NETDATA_CHART_PRIO_DISK_SVCTM,
update_every,
RRDSET_TYPE_LINE
);
@@ -728,10 +728,10 @@ int do_kern_devstat(int update_every, usec_t dt) {
"disk",
NULL,
"Disk I/O",
- "kilobytes/s",
- "freebsd.plugin",
+ "KiB/s",
+ "freebsd.plugin",
"devstat",
- NETDATA_CHART_PRIO_SYSTEM_IO,
+ NETDATA_CHART_PRIO_SYSTEM_IO,
update_every,
RRDSET_TYPE_AREA
);
diff --git a/collectors/freebsd.plugin/freebsd_getifaddrs.c b/collectors/freebsd.plugin/freebsd_getifaddrs.c
index e1584585..ac1638ee 100644
--- a/collectors/freebsd.plugin/freebsd_getifaddrs.c
+++ b/collectors/freebsd.plugin/freebsd_getifaddrs.c
@@ -144,7 +144,7 @@ int do_getifaddrs(int update_every, usec_t dt) {
(void)dt;
#define DEFAULT_EXLUDED_INTERFACES "lo*"
-#define DEFAULT_PHYSICAL_INTERFACES "igb* ix* cxl* em* ixl* ixlv* bge* ixgbe*"
+#define DEFAULT_PHYSICAL_INTERFACES "igb* ix* cxl* em* ixl* ixlv* bge* ixgbe* vtnet*"
#define CONFIG_SECTION_GETIFADDRS "plugin:freebsd:getifaddrs"
static int enable_new_interfaces = -1;
@@ -156,7 +156,7 @@ int do_getifaddrs(int update_every, usec_t dt) {
enable_new_interfaces = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS,
"enable new interfaces detected at runtime",
CONFIG_BOOLEAN_AUTO);
-
+
do_bandwidth_net = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "total bandwidth for physical interfaces",
CONFIG_BOOLEAN_AUTO);
do_packets_net = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "total packets for physical interfaces",
@@ -229,7 +229,7 @@ int do_getifaddrs(int update_every, usec_t dt) {
// --------------------------------------------------------------------
if (likely(do_bandwidth_net)) {
-
+
iftot.ift_ibytes = iftot.ift_obytes = 0;
for (ifa = ifap; ifa; ifa = ifa->ifa_next) {
if (ifa->ifa_addr->sa_family != AF_LINK)
@@ -239,10 +239,10 @@ int do_getifaddrs(int update_every, usec_t dt) {
iftot.ift_ibytes += IFA_DATA(ibytes);
iftot.ift_obytes += IFA_DATA(obytes);
}
-
+
static RRDSET *st = NULL;
static RRDDIM *rd_in = NULL, *rd_out = NULL;
-
+
if (unlikely(!st)) {
st = rrdset_create_localhost("system",
"net",
@@ -251,23 +251,23 @@ int do_getifaddrs(int update_every, usec_t dt) {
NULL,
"Network Traffic",
"kilobits/s",
- "freebsd.plugin",
+ "freebsd.plugin",
"getifaddrs",
- NETDATA_CHART_PRIO_SYSTEM_NET,
+ NETDATA_CHART_PRIO_SYSTEM_NET,
update_every,
RRDSET_TYPE_AREA
);
-
+
rd_in = rrddim_add(st, "InOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
rd_out = rrddim_add(st, "OutOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
} else
rrdset_next(st);
-
+
rrddim_set_by_pointer(st, rd_in, iftot.ift_ibytes);
rrddim_set_by_pointer(st, rd_out, iftot.ift_obytes);
rrdset_done(st);
}
-
+
// --------------------------------------------------------------------
if (likely(do_packets_net)) {
@@ -295,9 +295,9 @@ int do_getifaddrs(int update_every, usec_t dt) {
NULL,
"Network Packets",
"packets/s",
- "freebsd.plugin",
+ "freebsd.plugin",
"getifaddrs",
- NETDATA_CHART_PRIO_SYSTEM_PACKETS,
+ NETDATA_CHART_PRIO_SYSTEM_PACKETS,
update_every,
RRDSET_TYPE_LINE
);
@@ -340,9 +340,9 @@ int do_getifaddrs(int update_every, usec_t dt) {
NULL,
"IPv4 Bandwidth",
"kilobits/s",
- "freebsd.plugin",
+ "freebsd.plugin",
"getifaddrs",
- NETDATA_CHART_PRIO_SYSTEM_IPV4,
+ NETDATA_CHART_PRIO_SYSTEM_IPV4,
update_every,
RRDSET_TYPE_AREA
);
@@ -379,9 +379,9 @@ int do_getifaddrs(int update_every, usec_t dt) {
NULL,
"IPv6 Bandwidth",
"kilobits/s",
- "freebsd.plugin",
+ "freebsd.plugin",
"getifaddrs",
- NETDATA_CHART_PRIO_SYSTEM_IPV6,
+ NETDATA_CHART_PRIO_SYSTEM_IPV6,
update_every,
RRDSET_TYPE_AREA
);
@@ -449,9 +449,9 @@ int do_getifaddrs(int update_every, usec_t dt) {
"net.net",
"Bandwidth",
"kilobits/s",
- "freebsd.plugin",
+ "freebsd.plugin",
"getifaddrs",
- NETDATA_CHART_PRIO_FIRST_NET_IFACE,
+ NETDATA_CHART_PRIO_FIRST_NET_IFACE,
update_every,
RRDSET_TYPE_AREA
);
@@ -478,9 +478,9 @@ int do_getifaddrs(int update_every, usec_t dt) {
"net.packets",
"Packets",
"packets/s",
- "freebsd.plugin",
+ "freebsd.plugin",
"getifaddrs",
- NETDATA_CHART_PRIO_FIRST_NET_PACKETS,
+ NETDATA_CHART_PRIO_FIRST_NET_PACKETS,
update_every,
RRDSET_TYPE_LINE
);
@@ -517,9 +517,9 @@ int do_getifaddrs(int update_every, usec_t dt) {
"net.errors",
"Interface Errors",
"errors/s",
- "freebsd.plugin",
+ "freebsd.plugin",
"getifaddrs",
- NETDATA_CHART_PRIO_FIRST_NET_ERRORS,
+ NETDATA_CHART_PRIO_FIRST_NET_ERRORS,
update_every,
RRDSET_TYPE_LINE
);
@@ -551,9 +551,9 @@ int do_getifaddrs(int update_every, usec_t dt) {
"net.drops",
"Interface Drops",
"drops/s",
- "freebsd.plugin",
+ "freebsd.plugin",
"getifaddrs",
- NETDATA_CHART_PRIO_FIRST_NET_DROPS,
+ NETDATA_CHART_PRIO_FIRST_NET_DROPS,
update_every,
RRDSET_TYPE_LINE
);
@@ -586,9 +586,9 @@ int do_getifaddrs(int update_every, usec_t dt) {
"net.events",
"Network Interface Events",
"events/s",
- "freebsd.plugin",
+ "freebsd.plugin",
"getifaddrs",
- NETDATA_CHART_PRIO_FIRST_NET_EVENTS,
+ NETDATA_CHART_PRIO_FIRST_NET_EVENTS,
update_every,
RRDSET_TYPE_LINE
);
diff --git a/collectors/freebsd.plugin/freebsd_getmntinfo.c b/collectors/freebsd.plugin/freebsd_getmntinfo.c
index c86f2316..d050c627 100644
--- a/collectors/freebsd.plugin/freebsd_getmntinfo.c
+++ b/collectors/freebsd.plugin/freebsd_getmntinfo.c
@@ -226,10 +226,10 @@ int do_getmntinfo(int update_every, usec_t dt) {
mntbuf[i].f_mntonname,
"disk.space",
title,
- "GB",
- "freebsd.plugin",
+ "GiB",
+ "freebsd.plugin",
"getmntinfo",
- NETDATA_CHART_PRIO_DISKSPACE_SPACE,
+ NETDATA_CHART_PRIO_DISKSPACE_SPACE,
update_every,
RRDSET_TYPE_STACKED
);
@@ -265,10 +265,10 @@ int do_getmntinfo(int update_every, usec_t dt) {
mntbuf[i].f_mntonname,
"disk.inodes",
title,
- "Inodes",
- "freebsd.plugin",
+ "inodes",
+ "freebsd.plugin",
"getmntinfo",
- NETDATA_CHART_PRIO_DISKSPACE_INODES,
+ NETDATA_CHART_PRIO_DISKSPACE_INODES,
update_every,
RRDSET_TYPE_STACKED
);
diff --git a/collectors/freebsd.plugin/freebsd_ipfw.c b/collectors/freebsd.plugin/freebsd_ipfw.c
index c256da8b..a1e50e20 100644
--- a/collectors/freebsd.plugin/freebsd_ipfw.c
+++ b/collectors/freebsd.plugin/freebsd_ipfw.c
@@ -197,7 +197,7 @@ int do_ipfw(int update_every, usec_t dt) {
NULL,
"Packets",
"packets/s",
- "freebsd.plugin",
+ "freebsd.plugin",
"ipfw",
NETDATA_CHART_PRIO_IPFW_PACKETS,
update_every,
@@ -214,7 +214,7 @@ int do_ipfw(int update_every, usec_t dt) {
NULL,
"Bytes",
"bytes/s",
- "freebsd.plugin",
+ "freebsd.plugin",
"ipfw",
NETDATA_CHART_PRIO_IPFW_BYTES,
update_every,
@@ -318,7 +318,7 @@ int do_ipfw(int update_every, usec_t dt) {
NULL,
"Active rules",
"rules",
- "freebsd.plugin",
+ "freebsd.plugin",
"ipfw",
NETDATA_CHART_PRIO_IPFW_ACTIVE,
update_every,
@@ -335,7 +335,7 @@ int do_ipfw(int update_every, usec_t dt) {
NULL,
"Expired rules",
"rules",
- "freebsd.plugin",
+ "freebsd.plugin",
"ipfw",
NETDATA_CHART_PRIO_IPFW_EXPIRED,
update_every,
diff --git a/collectors/freebsd.plugin/freebsd_kstat_zfs.c b/collectors/freebsd.plugin/freebsd_kstat_zfs.c
index 93dfc320..02103c6b 100644
--- a/collectors/freebsd.plugin/freebsd_kstat_zfs.c
+++ b/collectors/freebsd.plugin/freebsd_kstat_zfs.c
@@ -11,6 +11,10 @@ extern struct arcstats arcstats;
int do_kstat_zfs_misc_arcstats(int update_every, usec_t dt) {
(void)dt;
+ static int show_zero_charts = -1;
+ if(unlikely(show_zero_charts == -1))
+ show_zero_charts = config_get_boolean_ondemand("plugin:freebsd:zfs_arcstats", "show zero charts", CONFIG_BOOLEAN_NO);
+
unsigned long long l2_size;
size_t uint64_t_size = sizeof(uint64_t);
static struct mibs {
@@ -31,11 +35,11 @@ int do_kstat_zfs_misc_arcstats(int update_every, usec_t dt) {
int deleted[5];
int mutex_miss[5];
int evict_skip[5];
- int evict_not_enough[5];
- int evict_l2_cached[5];
- int evict_l2_eligible[5];
- int evict_l2_ineligible[5];
- int evict_l2_skip[5];
+ // int evict_not_enough[5];
+ // int evict_l2_cached[5];
+ // int evict_l2_eligible[5];
+ // int evict_l2_ineligible[5];
+ // int evict_l2_skip[5];
int hash_elements[5];
int hash_elements_max[5];
int hash_collisions[5];
@@ -46,60 +50,60 @@ int do_kstat_zfs_misc_arcstats(int update_every, usec_t dt) {
int c_min[5];
int c_max[5];
int size[5];
- int hdr_size[5];
- int data_size[5];
- int metadata_size[5];
- int other_size[5];
- int anon_size[5];
- int anon_evictable_data[5];
- int anon_evictable_metadata[5];
+ // int hdr_size[5];
+ // int data_size[5];
+ // int metadata_size[5];
+ // int other_size[5];
+ // int anon_size[5];
+ // int anon_evictable_data[5];
+ // int anon_evictable_metadata[5];
int mru_size[5];
- int mru_evictable_data[5];
- int mru_evictable_metadata[5];
- int mru_ghost_size[5];
- int mru_ghost_evictable_data[5];
- int mru_ghost_evictable_metadata[5];
+ // int mru_evictable_data[5];
+ // int mru_evictable_metadata[5];
+ // int mru_ghost_size[5];
+ // int mru_ghost_evictable_data[5];
+ // int mru_ghost_evictable_metadata[5];
int mfu_size[5];
- int mfu_evictable_data[5];
- int mfu_evictable_metadata[5];
- int mfu_ghost_size[5];
- int mfu_ghost_evictable_data[5];
- int mfu_ghost_evictable_metadata[5];
+ // int mfu_evictable_data[5];
+ // int mfu_evictable_metadata[5];
+ // int mfu_ghost_size[5];
+ // int mfu_ghost_evictable_data[5];
+ // int mfu_ghost_evictable_metadata[5];
int l2_hits[5];
int l2_misses[5];
- int l2_feeds[5];
- int l2_rw_clash[5];
+ // int l2_feeds[5];
+ // int l2_rw_clash[5];
int l2_read_bytes[5];
int l2_write_bytes[5];
- int l2_writes_sent[5];
- int l2_writes_done[5];
- int l2_writes_error[5];
- int l2_writes_lock_retry[5];
- int l2_evict_lock_retry[5];
- int l2_evict_reading[5];
- int l2_evict_l1cached[5];
- int l2_free_on_write[5];
- int l2_cdata_free_on_write[5];
- int l2_abort_lowmem[5];
- int l2_cksum_bad[5];
- int l2_io_error[5];
+ // int l2_writes_sent[5];
+ // int l2_writes_done[5];
+ // int l2_writes_error[5];
+ // int l2_writes_lock_retry[5];
+ // int l2_evict_lock_retry[5];
+ // int l2_evict_reading[5];
+ // int l2_evict_l1cached[5];
+ // int l2_free_on_write[5];
+ // int l2_cdata_free_on_write[5];
+ // int l2_abort_lowmem[5];
+ // int l2_cksum_bad[5];
+ // int l2_io_error[5];
int l2_size[5];
int l2_asize[5];
- int l2_hdr_size[5];
- int l2_compress_successes[5];
- int l2_compress_zeros[5];
- int l2_compress_failures[5];
+ // int l2_hdr_size[5];
+ // int l2_compress_successes[5];
+ // int l2_compress_zeros[5];
+ // int l2_compress_failures[5];
int memory_throttle_count[5];
- int duplicate_buffers[5];
- int duplicate_buffers_size[5];
- int duplicate_reads[5];
- int memory_direct_count[5];
- int memory_indirect_count[5];
- int arc_no_grow[5];
- int arc_tempreserve[5];
- int arc_loaned_bytes[5];
- int arc_prune[5];
- int arc_meta_used[5];
+ // int duplicate_buffers[5];
+ // int duplicate_buffers_size[5];
+ // int duplicate_reads[5];
+ // int memory_direct_count[5];
+ // int memory_indirect_count[5];
+ // int arc_no_grow[5];
+ // int arc_tempreserve[5];
+ // int arc_loaned_bytes[5];
+ // int arc_prune[5];
+ // int arc_meta_used[5];
int arc_meta_limit[5];
int arc_meta_max[5];
int arc_meta_min[5];
@@ -209,8 +213,8 @@ int do_kstat_zfs_misc_arcstats(int update_every, usec_t dt) {
// missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_need_free", mibs.arc_need_free, arcstats.arc_need_free);
// missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_sys_free", mibs.arc_sys_free, arcstats.arc_sys_free);
- generate_charts_arcstats("freebsd", "zfs", update_every);
- generate_charts_arc_summary("freebsd", "zfs", update_every);
+ generate_charts_arcstats("freebsd", "zfs", show_zero_charts, update_every);
+ generate_charts_arc_summary("freebsd", "zfs", show_zero_charts, update_every);
return 0;
}
@@ -261,7 +265,7 @@ int do_kstat_zfs_misc_zio_trim(int update_every, usec_t dt) {
rrddim_set_by_pointer(st_bytes, rd_bytes, bytes);
rrdset_done(st_bytes);
-
+
// --------------------------------------------------------------------
static RRDSET *st_requests = NULL;
@@ -293,7 +297,7 @@ int do_kstat_zfs_misc_zio_trim(int update_every, usec_t dt) {
rrddim_set_by_pointer(st_requests, rd_failed, failed);
rrddim_set_by_pointer(st_requests, rd_unsupported, unsupported);
rrdset_done(st_requests);
-
+
}
return 0;
diff --git a/collectors/freebsd.plugin/freebsd_sysctl.c b/collectors/freebsd.plugin/freebsd_sysctl.c
index da5a351d..3f1b1001 100644
--- a/collectors/freebsd.plugin/freebsd_sysctl.c
+++ b/collectors/freebsd.plugin/freebsd_sysctl.c
@@ -276,7 +276,7 @@ int do_vm_vmtotal(int update_every, usec_t dt) {
"system",
NULL,
"Committed (Allocated) Memory",
- "MB",
+ "MiB",
"freebsd.plugin",
"vm.vmtotal",
NETDATA_CHART_PRIO_MEM_SYSTEM_COMMITTED,
@@ -580,7 +580,6 @@ int do_hw_intcnt(int update_every, usec_t dt) {
(void)dt;
static int mib_hw_intrcnt[2] = {0, 0};
size_t intrcnt_size = 0;
- unsigned long i;
if (unlikely(GETSYSCTL_SIZE("hw.intrcnt", mib_hw_intrcnt, intrcnt_size))) {
error("DISABLED: system.intr chart");
@@ -591,7 +590,7 @@ int do_hw_intcnt(int update_every, usec_t dt) {
unsigned long nintr = 0;
static unsigned long old_nintr = 0;
static unsigned long *intrcnt = NULL;
- unsigned long long totalintr = 0;
+ unsigned long i;
nintr = intrcnt_size / sizeof(u_long);
if (unlikely(nintr != old_nintr))
@@ -602,6 +601,8 @@ int do_hw_intcnt(int update_every, usec_t dt) {
error("DISABLED: hw.intrcnt module");
return 1;
} else {
+ unsigned long long totalintr = 0;
+
for (i = 0; i < nintr; i++)
totalintr += intrcnt[i];
@@ -653,7 +654,6 @@ int do_hw_intcnt(int update_every, usec_t dt) {
// --------------------------------------------------------------------
static RRDSET *st_interrupts = NULL;
- void *p;
if (unlikely(!st_interrupts))
st_interrupts = rrdset_create_localhost(
@@ -674,6 +674,8 @@ int do_hw_intcnt(int update_every, usec_t dt) {
rrdset_next(st_interrupts);
for (i = 0; i < nintr; i++) {
+ void *p;
+
p = intrnames + i * (MAXCOMLEN + 1);
if (unlikely((intrcnt[i] != 0) && (*(char *) p != 0))) {
RRDDIM *rd_interrupts = rrddim_find(st_interrupts, p);
@@ -936,7 +938,7 @@ int do_vm_swap_info(int update_every, usec_t dt) {
"swap",
NULL,
"System Swap",
- "MB",
+ "MiB",
"freebsd.plugin",
"vm.swap_info",
NETDATA_CHART_PRIO_SYSTEM_SWAP,
@@ -965,11 +967,14 @@ int do_vm_swap_info(int update_every, usec_t dt) {
int do_system_ram(int update_every, usec_t dt) {
(void)dt;
static int mib_active_count[4] = {0, 0, 0, 0}, mib_inactive_count[4] = {0, 0, 0, 0}, mib_wire_count[4] = {0, 0, 0, 0},
- mib_cache_count[4] = {0, 0, 0, 0}, mib_laundry_count[4] = {0, 0, 0, 0}, mib_vfs_bufspace[2] = {0, 0},
- mib_free_count[4] = {0, 0, 0, 0};
+ mib_cache_count[4] = {0, 0, 0, 0}, mib_vfs_bufspace[2] = {0, 0}, mib_free_count[4] = {0, 0, 0, 0};
vmmeter_t vmmeter_data;
int vfs_bufspace_count;
+#if defined(NETDATA_COLLECT_LAUNDRY)
+ static int mib_laundry_count[4] = {0, 0, 0, 0};
+#endif
+
if (unlikely(GETSYSCTL_SIMPLE("vm.stats.vm.v_active_count", mib_active_count, vmmeter_data.v_active_count) ||
GETSYSCTL_SIMPLE("vm.stats.vm.v_inactive_count", mib_inactive_count, vmmeter_data.v_inactive_count) ||
GETSYSCTL_SIMPLE("vm.stats.vm.v_wire_count", mib_wire_count, vmmeter_data.v_wire_count) ||
@@ -990,7 +995,11 @@ int do_system_ram(int update_every, usec_t dt) {
static RRDSET *st = NULL;
static RRDDIM *rd_free = NULL, *rd_active = NULL, *rd_inactive = NULL, *rd_wired = NULL,
- *rd_cache = NULL, *rd_laundry = NULL, *rd_buffers = NULL;
+ *rd_cache = NULL, *rd_buffers = NULL;
+
+#if defined(NETDATA_COLLECT_LAUNDRY)
+ static RRDDIM *rd_laundry = NULL;
+#endif
if (unlikely(!st)) {
st = rrdset_create_localhost(
@@ -1000,7 +1009,7 @@ int do_system_ram(int update_every, usec_t dt) {
"ram",
NULL,
"System RAM",
- "MB",
+ "MiB",
"freebsd.plugin",
"system.ram",
NETDATA_CHART_PRIO_SYSTEM_RAM,
@@ -1067,7 +1076,7 @@ int do_vm_stats_sys_v_swappgs(int update_every, usec_t dt) {
"swap",
NULL,
"Swap I/O",
- "kilobytes/s",
+ "KiB/s",
"freebsd.plugin",
"vm.stats.vm.v_swappgs",
NETDATA_CHART_PRIO_SYSTEM_SWAPIO,
@@ -1155,7 +1164,7 @@ int do_vm_stats_sys_v_pgfaults(int update_every, usec_t dt) {
int do_kern_ipc_sem(int update_every, usec_t dt) {
(void)dt;
- static int mib_semmni[3] = {0, 0, 0}, mib_sema[3] = {0, 0, 0};
+ static int mib_semmni[3] = {0, 0, 0};
struct ipc_sem {
int semmni;
collected_number sets;
@@ -1170,6 +1179,7 @@ int do_kern_ipc_sem(int update_every, usec_t dt) {
} else {
static struct semid_kernel *ipc_sem_data = NULL;
static int old_semmni = 0;
+ static int mib_sema[3] = {0, 0, 0};
if (unlikely(ipc_sem.semmni != old_semmni)) {
ipc_sem_data = reallocz(ipc_sem_data, sizeof(struct semid_kernel) * ipc_sem.semmni);
@@ -1253,7 +1263,7 @@ int do_kern_ipc_sem(int update_every, usec_t dt) {
int do_kern_ipc_shm(int update_every, usec_t dt) {
(void)dt;
- static int mib_shmmni[3] = {0, 0, 0}, mib_shmsegs[3] = {0, 0, 0};
+ static int mib_shmmni[3] = {0, 0, 0};
struct ipc_shm {
u_long shmmni;
collected_number segs;
@@ -1268,6 +1278,7 @@ int do_kern_ipc_shm(int update_every, usec_t dt) {
} else {
static struct shmid_kernel *ipc_shm_data = NULL;
static u_long old_shmmni = 0;
+ static int mib_shmsegs[3] = {0, 0, 0};
if (unlikely(ipc_shm.shmmni != old_shmmni)) {
ipc_shm_data = reallocz(ipc_shm_data, sizeof(struct shmid_kernel) * ipc_shm.shmmni);
@@ -1327,7 +1338,7 @@ int do_kern_ipc_shm(int update_every, usec_t dt) {
"ipc shared memory",
NULL,
"IPC Shared Memory Segments Size",
- "kilobytes",
+ "KiB",
"freebsd.plugin",
"kern.ipc.shm",
NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_SIZE,
@@ -1352,7 +1363,7 @@ int do_kern_ipc_shm(int update_every, usec_t dt) {
int do_kern_ipc_msq(int update_every, usec_t dt) {
(void)dt;
- static int mib_msgmni[3] = {0, 0, 0}, mib_msqids[3] = {0, 0, 0};
+ static int mib_msgmni[3] = {0, 0, 0};
struct ipc_msq {
int msgmni;
collected_number queues;
@@ -1370,6 +1381,7 @@ int do_kern_ipc_msq(int update_every, usec_t dt) {
} else {
static struct msqid_kernel *ipc_msq_data = NULL;
static int old_msgmni = 0;
+ static int mib_msqids[3] = {0, 0, 0};
if (unlikely(ipc_msq.msgmni != old_msgmni)) {
ipc_msq_data = reallocz(ipc_msq_data, sizeof(struct msqid_kernel) * ipc_msq.msgmni);
@@ -1531,12 +1543,6 @@ int do_net_isr(int update_every, usec_t dt) {
do_netisr_per_core = config_get_boolean("plugin:freebsd:net.isr", "netisr per core", 1);
}
- static int mib_workstream[3] = {0, 0, 0}, mib_work[3] = {0, 0, 0};
- int common_error = 0;
- size_t netisr_workstream_size = 0, netisr_work_size = 0;
- unsigned long num_netisr_workstreams = 0, num_netisr_works = 0;
- static struct sysctl_netisr_workstream *netisr_workstream = NULL;
- static struct sysctl_netisr_work *netisr_work = NULL;
static struct netisr_stats {
collected_number dispatched;
collected_number hybrid_dispatched;
@@ -1545,6 +1551,13 @@ int do_net_isr(int update_every, usec_t dt) {
} *netisr_stats = NULL;
if (likely(do_netisr || do_netisr_per_core)) {
+ static int mib_workstream[3] = {0, 0, 0}, mib_work[3] = {0, 0, 0};
+ size_t netisr_workstream_size = 0, netisr_work_size = 0;
+ static struct sysctl_netisr_workstream *netisr_workstream = NULL;
+ static struct sysctl_netisr_work *netisr_work = NULL;
+ unsigned long num_netisr_workstreams = 0, num_netisr_works = 0;
+ int common_error = 0;
+
if (unlikely(GETSYSCTL_SIZE("net.isr.workstream", mib_workstream, netisr_workstream_size))) {
common_error = 1;
} else if (unlikely(GETSYSCTL_SIZE("net.isr.work", mib_work, netisr_work_size))) {
@@ -2034,7 +2047,7 @@ int do_net_inet_tcp_stats(int update_every, usec_t dt) {
rrddim_set_by_pointer(st, rd_failed, tcpstat.tcps_sc_zonefail);
rrdset_done(st);
}
-
+
// --------------------------------------------------------------------
if(do_tcpext_listen == CONFIG_BOOLEAN_YES || (do_tcpext_listen == CONFIG_BOOLEAN_AUTO && tcpstat.tcps_listendrop)) {
@@ -2243,7 +2256,6 @@ int do_net_inet_icmp_stats(int update_every, usec_t dt) {
if (likely(do_icmp_packets || do_icmp_errors || do_icmpmsg)) {
static int mib[4] = {0, 0, 0, 0};
struct icmpstat icmpstat;
- int i;
struct icmp_total {
u_long msgs_in;
u_long msgs_out;
@@ -2259,6 +2271,8 @@ int do_net_inet_icmp_stats(int update_every, usec_t dt) {
error("DISABLED: net.inet.icmp.stats module");
return 1;
} else {
+ int i;
+
for (i = 0; i <= ICMP_MAXTYPE; i++) {
icmp_total.msgs_in += icmpstat.icps_inhist[i];
icmp_total.msgs_out += icmpstat.icps_outhist[i];
@@ -2668,7 +2682,7 @@ int do_net_inet6_ip6_stats(int update_every, usec_t dt) {
NULL,
"IPv6 Fragments Sent",
"packets/s",
- "freebsd.plugin",
+ "freebsd.plugin",
"net.inet6.ip6.stats",
3010,
update_every,
diff --git a/collectors/freeipmi.plugin/Makefile.in b/collectors/freeipmi.plugin/Makefile.in
deleted file mode 100644
index 54a0035c..00000000
--- a/collectors/freeipmi.plugin/Makefile.in
+++ /dev/null
@@ -1,464 +0,0 @@
-# Makefile.in generated by automake 1.14.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2013 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-VPATH = @srcdir@
-am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-subdir = collectors/freeipmi.plugin
-DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
- $(dist_noinst_DATA)
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-DATA = $(dist_noinst_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-dist_noinst_DATA = \
- README.md \
- $(NULL)
-
-all: all-am
-
-.SUFFIXES:
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/freeipmi.plugin/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/freeipmi.plugin/Makefile
-.PRECIOUS: Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-tags TAGS:
-
-ctags CTAGS:
-
-cscope cscopelist:
-
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(DATA)
-installdirs:
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-am
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-generic
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am:
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am:
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am:
-
-.MAKE: install-am install-strip
-
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dvi install-dvi-am install-exec \
- install-exec-am install-html install-html-am install-info \
- install-info-am install-man install-pdf install-pdf-am \
- install-ps install-ps-am install-strip installcheck \
- installcheck-am installdirs maintainer-clean \
- maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
- pdf-am ps ps-am tags-am uninstall uninstall-am
-
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/freeipmi.plugin/README.md b/collectors/freeipmi.plugin/README.md
index 6d4ad186..a2beddb5 100644
--- a/collectors/freeipmi.plugin/README.md
+++ b/collectors/freeipmi.plugin/README.md
@@ -1,8 +1,10 @@
-netdata has a [freeipmi](https://www.gnu.org/software/freeipmi/) plugin.
+# freeipmi.plugin
+
+Netdata has a [freeipmi](https://www.gnu.org/software/freeipmi/) plugin.
> FreeIPMI provides in-band and out-of-band IPMI software based on the IPMI v1.5/2.0 specification. The IPMI specification defines a set of interfaces for platform management and is implemented by a number vendors for system management. The features of IPMI that most users will be interested in are sensor monitoring, system event monitoring, power control, and serial-over-LAN (SOL).
-## compile `freeipmi.plugin`
+## Compile `freeipmi.plugin`
1. install `libipmimonitoring-dev` or `libipmimonitoring-devel` (`freeipmi-devel` on RHEL based OS) using the package manager of your system.
@@ -12,7 +14,7 @@ Keep in mind IPMI requires root access, so the plugin is setuid to root.
If you just installed the required IPMI tools, please run at least once the command `ipmimonitoring` and verify it returns sensors information. This command initialises IPMI configuration, so that the netdata plugin will be able to work.
-## netdata use
+## Netdata use
The plugin creates (up to) 8 charts, based on the information collected from IPMI:
@@ -101,7 +103,7 @@ You can set these options in `/etc/netdata/netdata.conf` at this section:
Append to `command options = ` the settings you need. The minimum `update every` is 5 (enforced internally by the plugin). IPMI is slow and CPU hungry. So, once every 5 seconds is pretty acceptable.
-## ignoring specific sensors
+## Ignoring specific sensors
Specific sensor IDs can be excluded from freeipmi tools by editing `/etc/freeipmi/freeipmi.conf` and setting the IDs to be ignored at `ipmi-sensors-exclude-record-ids`. **However this file is not used by `libipmimonitoring`** (the library used by netdata's `freeipmi.plugin`).
@@ -135,7 +137,7 @@ ID | Name | Type | State | Reading | Unit
```
-## debugging
+## Debugging
You can run the plugin by hand:
@@ -178,3 +180,5 @@ If you need to disable IPMI for netdata, edit `/etc/netdata/netdata.conf` and se
[plugins]
freeipmi = no
```
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Ffreeipmi.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/freeipmi.plugin/freeipmi_plugin.c b/collectors/freeipmi.plugin/freeipmi_plugin.c
index 7fc012d3..35b9a003 100644
--- a/collectors/freeipmi.plugin/freeipmi_plugin.c
+++ b/collectors/freeipmi.plugin/freeipmi_plugin.c
@@ -35,6 +35,13 @@ void netdata_cleanup_and_exit(int ret) {
exit(ret);
}
+void send_statistics( const char *action, const char *action_result, const char *action_data) {
+ (void)action;
+ (void)action_result;
+ (void)action_data;
+ return;
+}
+
// callbacks required by popen()
void signals_block(void) {};
void signals_unblock(void) {};
@@ -102,11 +109,11 @@ char *sensor_config_file = NULL;
* - See ipmi_monitoring.h for descriptions of these flags.
*/
int reread_sdr_cache = 0;
-int ignore_non_interpretable_sensors = 1;
+int ignore_non_interpretable_sensors = 0;
int bridge_sensors = 0;
int interpret_oem_data = 0;
int shared_sensors = 0;
-int discrete_reading = 0;
+int discrete_reading = 1;
int ignore_scanning_disabled = 0;
int assume_bmc_owner = 0;
int entity_sensor_names = 0;
@@ -321,7 +328,7 @@ static void send_chart_to_netdata_for_units(int units) {
switch(units) {
case IPMI_MONITORING_SENSOR_UNITS_CELSIUS:
- printf("CHART ipmi.temperatures_c '' 'System Celcius Temperatures read by IPMI' 'Celcius' 'temperatures' 'ipmi.temperatures_c' 'line' %d %d\n"
+ printf("CHART ipmi.temperatures_c '' 'System Celsius Temperatures read by IPMI' 'Celsius' 'temperatures' 'ipmi.temperatures_c' 'line' %d %d\n"
, netdata_priority + 10
, netdata_update_every
);
@@ -665,10 +672,13 @@ static void netdata_get_sensor(
if(!sn) {
// not found, create it
-
// check if it is excluded
- if(excluded_record_ids_check(record_id))
+ if(excluded_record_ids_check(record_id)) {
+ if(debug) fprintf(stderr, "Sensor '%s' is excluded by excluded_record_ids_check()\n", sensor_name);
return;
+ }
+
+ if(debug) fprintf(stderr, "Allocating new sensor data record for sensor '%s', id %d, number %d, type %d, state %d, units %d, reading_type %d\n", sensor_name, record_id, sensor_number, sensor_type, sensor_state, sensor_units, sensor_reading_type);
sn = calloc(1, sizeof(struct sensor));
if(!sn) {
@@ -689,6 +699,9 @@ static void netdata_get_sensor(
sn->next = sensors_root;
sensors_root = sn;
}
+ else {
+ if(debug) fprintf(stderr, "Reusing sensor record for sensor '%s', id %d, number %d, type %d, state %d, units %d, reading_type %d\n", sensor_name, record_id, sensor_number, sensor_type, sensor_state, sensor_units, sensor_reading_type);
+ }
switch(sensor_reading_type) {
case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER8_BOOL:
@@ -710,13 +723,16 @@ static void netdata_get_sensor(
break;
default:
+ if(debug) fprintf(stderr, "Unknown reading type - Ignoring sensor record for sensor '%s', id %d, number %d, type %d, state %d, units %d, reading_type %d\n", sensor_name, record_id, sensor_number, sensor_type, sensor_state, sensor_units, sensor_reading_type);
sn->ignore = 1;
break;
}
// check if it is excluded
- if(excluded_status_record_ids_check(record_id))
+ if(excluded_status_record_ids_check(record_id)) {
+ if(debug) fprintf(stderr, "Sensor '%s' is excluded for status check, by excluded_status_record_ids_check()\n", sensor_name);
return;
+ }
switch(sensor_state) {
case IPMI_MONITORING_STATE_NOMINAL:
@@ -963,12 +979,13 @@ _ipmimonitoring_sensors (struct ipmi_monitoring_ipmi_config *ipmi_config)
goto cleanup;
}
- if (!(sensor_bitmask_strings = ipmi_monitoring_sensor_read_sensor_bitmask_strings (ctx)))
- {
- error( "ipmi_monitoring_sensor_read_sensor_bitmask_strings(): %s",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
+ /* it's ok for this to be NULL, i.e. sensor_bitmask ==
+ * IPMI_MONITORING_SENSOR_BITMASK_TYPE_UNKNOWN
+ */
+ sensor_bitmask_strings = ipmi_monitoring_sensor_read_sensor_bitmask_strings (ctx);
+
+
+
#endif // NETDATA_COMMENTED
if ((sensor_reading_type = ipmi_monitoring_sensor_read_sensor_reading_type (ctx)) < 0)
@@ -1075,7 +1092,8 @@ _ipmimonitoring_sensors (struct ipmi_monitoring_ipmi_config *ipmi_config)
else
printf (", N/A");
- if (sensor_bitmask_type != IPMI_MONITORING_SENSOR_BITMASK_TYPE_UNKNOWN)
+ if (sensor_bitmask_type != IPMI_MONITORING_SENSOR_BITMASK_TYPE_UNKNOWN
+ && sensor_bitmask_strings)
{
unsigned int i = 0;
diff --git a/collectors/idlejitter.plugin/Makefile.in b/collectors/idlejitter.plugin/Makefile.in
deleted file mode 100644
index 973a3bef..00000000
--- a/collectors/idlejitter.plugin/Makefile.in
+++ /dev/null
@@ -1,464 +0,0 @@
-# Makefile.in generated by automake 1.14.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2013 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-VPATH = @srcdir@
-am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-subdir = collectors/idlejitter.plugin
-DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
- $(dist_noinst_DATA)
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-DATA = $(dist_noinst_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-dist_noinst_DATA = \
- README.md \
- $(NULL)
-
-all: all-am
-
-.SUFFIXES:
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/idlejitter.plugin/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/idlejitter.plugin/Makefile
-.PRECIOUS: Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-tags TAGS:
-
-ctags CTAGS:
-
-cscope cscopelist:
-
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(DATA)
-installdirs:
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-am
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-generic
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am:
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am:
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am:
-
-.MAKE: install-am install-strip
-
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dvi install-dvi-am install-exec \
- install-exec-am install-html install-html-am install-info \
- install-info-am install-man install-pdf install-pdf-am \
- install-ps install-ps-am install-strip installcheck \
- installcheck-am installdirs maintainer-clean \
- maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
- pdf-am ps ps-am tags-am uninstall uninstall-am
-
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/idlejitter.plugin/README.md b/collectors/idlejitter.plugin/README.md
index 3c208053..e8e78085 100644
--- a/collectors/idlejitter.plugin/README.md
+++ b/collectors/idlejitter.plugin/README.md
@@ -1,8 +1,8 @@
-## idlejitter.plugin
+# idlejitter.plugin
It works like this:
-A thread is spawn that requests to sleep for 20000 microseconds (20ms).
+A thread is spawned that requests to sleep for 20000 microseconds (20ms).
When the system wakes it up, it measures how many microseconds have passed.
The difference between the requested and the actual duration of the sleep, is the idle jitter.
This is done at most 50 times per second, to ensure we have a good average.
@@ -11,3 +11,5 @@ This number is useful:
1. in real-time environments, when the CPU jitter can affect the quality of the service (like VoIP media gateways).
2. in cloud infrastructure, at can pause the VM or container for a small duration to perform operations at the host.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fidlejitter.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/macos.plugin/Makefile.in b/collectors/macos.plugin/Makefile.in
deleted file mode 100644
index d5979211..00000000
--- a/collectors/macos.plugin/Makefile.in
+++ /dev/null
@@ -1,464 +0,0 @@
-# Makefile.in generated by automake 1.14.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2013 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-VPATH = @srcdir@
-am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-subdir = collectors/macos.plugin
-DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
- $(dist_noinst_DATA)
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-DATA = $(dist_noinst_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-dist_noinst_DATA = \
- README.md \
- $(NULL)
-
-all: all-am
-
-.SUFFIXES:
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/macos.plugin/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/macos.plugin/Makefile
-.PRECIOUS: Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-tags TAGS:
-
-ctags CTAGS:
-
-cscope cscopelist:
-
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(DATA)
-installdirs:
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-am
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-generic
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am:
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am:
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am:
-
-.MAKE: install-am install-strip
-
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dvi install-dvi-am install-exec \
- install-exec-am install-html install-html-am install-info \
- install-info-am install-man install-pdf install-pdf-am \
- install-ps install-ps-am install-strip installcheck \
- installcheck-am installdirs maintainer-clean \
- maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
- pdf-am ps ps-am tags-am uninstall uninstall-am
-
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/macos.plugin/README.md b/collectors/macos.plugin/README.md
index ddbcc8f9..3e2554e4 100644
--- a/collectors/macos.plugin/README.md
+++ b/collectors/macos.plugin/README.md
@@ -1,3 +1,5 @@
-# macos
+# macos.plugin
Collects resource usage and performance data on MacOS systems
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fmacos.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/macos.plugin/macos_fw.c b/collectors/macos.plugin/macos_fw.c
index 5d0ba929..f253489a 100644
--- a/collectors/macos.plugin/macos_fw.c
+++ b/collectors/macos.plugin/macos_fw.c
@@ -154,7 +154,7 @@ int do_macos_iokit(int update_every, usec_t dt) {
, diskstat.name
, "disk.io"
, "Disk I/O Bandwidth"
- , "kilobytes/s"
+ , "KiB/s"
, "macos"
, "iokit"
, 2000
@@ -306,7 +306,7 @@ int do_macos_iokit(int update_every, usec_t dt) {
, diskstat.name
, "disk.await"
, "Average Completed I/O Operation Time"
- , "ms per operation"
+ , "milliseconds/operation"
, "macos"
, "iokit"
, 2005
@@ -337,7 +337,7 @@ int do_macos_iokit(int update_every, usec_t dt) {
, diskstat.name
, "disk.avgsz"
, "Average Completed I/O Operation Bandwidth"
- , "kilobytes per operation"
+ , "KiB/operation"
, "macos"
, "iokit"
, 2006
@@ -368,7 +368,7 @@ int do_macos_iokit(int update_every, usec_t dt) {
, diskstat.name
, "disk.svctm"
, "Average Service Time"
- , "ms per operation"
+ , "milliseconds/operation"
, "macos"
, "iokit"
, 2007
@@ -410,7 +410,7 @@ int do_macos_iokit(int update_every, usec_t dt) {
, "disk"
, NULL
, "Disk I/O"
- , "kilobytes/s"
+ , "KiB/s"
, "macos"
, "iokit"
, 150
@@ -463,7 +463,7 @@ int do_macos_iokit(int update_every, usec_t dt) {
, mntbuf[i].f_mntonname
, "disk.space"
, title
- , "GB"
+ , "GiB"
, "macos"
, "iokit"
, 2023
@@ -496,7 +496,7 @@ int do_macos_iokit(int update_every, usec_t dt) {
, mntbuf[i].f_mntonname
, "disk.inodes"
, title
- , "Inodes"
+ , "inodes"
, "macos"
, "iokit"
, 2024
diff --git a/collectors/macos.plugin/macos_mach_smi.c b/collectors/macos.plugin/macos_mach_smi.c
index 1c43d624..800b2ce5 100644
--- a/collectors/macos.plugin/macos_mach_smi.c
+++ b/collectors/macos.plugin/macos_mach_smi.c
@@ -90,7 +90,7 @@ int do_macos_mach_smi(int update_every, usec_t dt) {
}
// --------------------------------------------------------------------
-
+
if (likely(do_ram || do_swapio || do_pgfaults)) {
#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1060)
count = sizeof(vm_statistics64_data_t);
@@ -118,7 +118,7 @@ int do_macos_mach_smi(int update_every, usec_t dt) {
, "ram"
, NULL
, "System RAM"
- , "MB"
+ , "MiB"
, "macos"
, "mach_smi"
, 200
@@ -165,7 +165,7 @@ int do_macos_mach_smi(int update_every, usec_t dt) {
, "swap"
, NULL
, "Swap I/O"
- , "kilobytes/s"
+ , "KiB/s"
, "macos"
, "mach_smi"
, 250
@@ -196,7 +196,7 @@ int do_macos_mach_smi(int update_every, usec_t dt) {
, "system"
, NULL
, "Memory Page Faults"
- , "page faults/s"
+ , "faults/s"
, "macos"
, "mach_smi"
, NETDATA_CHART_PRIO_MEM_SYSTEM_PGFAULTS
@@ -233,8 +233,8 @@ int do_macos_mach_smi(int update_every, usec_t dt) {
rrdset_done(st);
}
}
- }
-
+ }
+
// --------------------------------------------------------------------
return 0;
diff --git a/collectors/macos.plugin/macos_sysctl.c b/collectors/macos.plugin/macos_sysctl.c
index 6b443c04..a8af72e6 100644
--- a/collectors/macos.plugin/macos_sysctl.c
+++ b/collectors/macos.plugin/macos_sysctl.c
@@ -279,7 +279,7 @@ int do_macos_sysctl(int update_every, usec_t dt) {
, "swap"
, NULL
, "System Swap"
- , "MB"
+ , "MiB"
, "macos"
, "sysctl"
, 201
@@ -965,7 +965,7 @@ int do_macos_sysctl(int update_every, usec_t dt) {
}
}
}
-
+
// --------------------------------------------------------------------
if (likely(do_ip6_packets || do_ip6_fragsout || do_ip6_fragsin || do_ip6_errors)) {
diff --git a/collectors/nfacct.plugin/Makefile.in b/collectors/nfacct.plugin/Makefile.in
deleted file mode 100644
index 2a1d001d..00000000
--- a/collectors/nfacct.plugin/Makefile.in
+++ /dev/null
@@ -1,464 +0,0 @@
-# Makefile.in generated by automake 1.14.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2013 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-VPATH = @srcdir@
-am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-subdir = collectors/nfacct.plugin
-DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
- $(dist_noinst_DATA)
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-DATA = $(dist_noinst_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-dist_noinst_DATA = \
- README.md \
- $(NULL)
-
-all: all-am
-
-.SUFFIXES:
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/nfacct.plugin/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/nfacct.plugin/Makefile
-.PRECIOUS: Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-tags TAGS:
-
-ctags CTAGS:
-
-cscope cscopelist:
-
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(DATA)
-installdirs:
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-am
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-generic
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am:
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am:
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am:
-
-.MAKE: install-am install-strip
-
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dvi install-dvi-am install-exec \
- install-exec-am install-html install-html-am install-info \
- install-info-am install-man install-pdf install-pdf-am \
- install-ps install-ps-am install-strip installcheck \
- installcheck-am installdirs maintainer-clean \
- maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
- pdf-am ps ps-am tags-am uninstall uninstall-am
-
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/nfacct.plugin/README.md b/collectors/nfacct.plugin/README.md
index 814b4791..5f1ee2e7 100644
--- a/collectors/nfacct.plugin/README.md
+++ b/collectors/nfacct.plugin/README.md
@@ -8,3 +8,5 @@ We have to move the code to an external plugin to setuid just the plugin not the
You can build netdata with it to test it though.
Just run `./configure` (or `netdata-installer.sh`) with the option `--enable-plugin-nfacct` (and any other options you may need).
Remember, you have to tell netdata you want it to run as `root` for this plugin to work.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fnfacct.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/node.d.plugin/.keep b/collectors/node.d.plugin/.keep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collectors/node.d.plugin/.keep
diff --git a/collectors/node.d.plugin/Makefile.am b/collectors/node.d.plugin/Makefile.am
index 4de13cf7..3b5a0a51 100644
--- a/collectors/node.d.plugin/Makefile.am
+++ b/collectors/node.d.plugin/Makefile.am
@@ -23,12 +23,11 @@ dist_noinst_DATA = \
usernodeconfigdir=$(configdir)/node.d
dist_usernodeconfig_DATA = \
- $(top_srcdir)/installer/.keep \
+ .keep \
$(NULL)
nodeconfigdir=$(libconfigdir)/node.d
dist_nodeconfig_DATA = \
- $(top_srcdir)/installer/.keep \
$(NULL)
dist_node_DATA = \
diff --git a/collectors/node.d.plugin/Makefile.in b/collectors/node.d.plugin/Makefile.in
deleted file mode 100644
index 4aec01de..00000000
--- a/collectors/node.d.plugin/Makefile.in
+++ /dev/null
@@ -1,805 +0,0 @@
-# Makefile.in generated by automake 1.14.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2013 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-
-VPATH = @srcdir@
-am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-DIST_COMMON = $(top_srcdir)/build/subst.inc \
- $(srcdir)/fronius/Makefile.inc $(srcdir)/named/Makefile.inc \
- $(srcdir)/sma_webbox/Makefile.inc $(srcdir)/snmp/Makefile.inc \
- $(srcdir)/stiebeleltron/Makefile.inc $(srcdir)/Makefile.in \
- $(srcdir)/Makefile.am $(dist_plugins_SCRIPTS) \
- $(dist_libconfig_DATA) $(dist_node_DATA) \
- $(dist_nodeconfig_DATA) $(dist_nodemodules_DATA) \
- $(dist_nodemoduleslibber_DATA) $(dist_noinst_DATA) \
- $(dist_usernodeconfig_DATA)
-subdir = collectors/node.d.plugin
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
-am__vpath_adj = case $$p in \
- $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
- *) f=$$p;; \
- esac;
-am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
-am__install_max = 40
-am__nobase_strip_setup = \
- srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
-am__nobase_strip = \
- for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
-am__nobase_list = $(am__nobase_strip_setup); \
- for p in $$list; do echo "$$p $$p"; done | \
- sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
- $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
- if (++n[$$2] == $(am__install_max)) \
- { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
- END { for (dir in files) print dir, files[dir] }'
-am__base_list = \
- sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
- sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
-am__uninstall_files_from_dir = { \
- test -z "$$files" \
- || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
- || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
- $(am__cd) "$$dir" && rm -f $$files; }; \
- }
-am__installdirs = "$(DESTDIR)$(pluginsdir)" \
- "$(DESTDIR)$(libconfigdir)" "$(DESTDIR)$(nodedir)" \
- "$(DESTDIR)$(nodeconfigdir)" "$(DESTDIR)$(nodemodulesdir)" \
- "$(DESTDIR)$(nodemoduleslibberdir)" \
- "$(DESTDIR)$(usernodeconfigdir)"
-SCRIPTS = $(dist_plugins_SCRIPTS)
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-DATA = $(dist_libconfig_DATA) $(dist_node_DATA) \
- $(dist_nodeconfig_DATA) $(dist_nodemodules_DATA) \
- $(dist_nodemoduleslibber_DATA) $(dist_noinst_DATA) \
- $(dist_usernodeconfig_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-CLEANFILES = \
- node.d.plugin \
- $(NULL)
-
-SUFFIXES = .in
-dist_libconfig_DATA = \
- node.d.conf \
- $(NULL)
-
-dist_plugins_SCRIPTS = \
- node.d.plugin \
- $(NULL)
-
-# dist_nodeconfig_DATA += fronius/fronius.conf
-
-# do not install these files, but include them in the distribution
-# dist_nodeconfig_DATA += named/named.conf
-
-# do not install these files, but include them in the distribution
-# dist_nodeconfig_DATA += sma_webbox/sma_webbox.conf
-
-# do not install these files, but include them in the distribution
-# dist_nodeconfig_DATA += snmp/snmp.conf
-
-# do not install these files, but include them in the distribution
-# dist_nodeconfig_DATA += stiebeleltron/stiebeleltron.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA = node.d.plugin.in README.md $(NULL) \
- fronius/README.md fronius/Makefile.inc named/README.md \
- named/Makefile.inc sma_webbox/README.md \
- sma_webbox/Makefile.inc snmp/README.md snmp/Makefile.inc \
- stiebeleltron/README.md stiebeleltron/Makefile.inc
-usernodeconfigdir = $(configdir)/node.d
-dist_usernodeconfig_DATA = \
- $(top_srcdir)/installer/.keep \
- $(NULL)
-
-nodeconfigdir = $(libconfigdir)/node.d
-dist_nodeconfig_DATA = \
- $(top_srcdir)/installer/.keep \
- $(NULL)
-
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-dist_node_DATA = $(NULL) fronius/fronius.node.js named/named.node.js \
- sma_webbox/sma_webbox.node.js snmp/snmp.node.js \
- stiebeleltron/stiebeleltron.node.js
-nodemodulesdir = $(nodedir)/node_modules
-dist_nodemodules_DATA = \
- node_modules/netdata.js \
- node_modules/extend.js \
- node_modules/pixl-xml.js \
- node_modules/net-snmp.js \
- node_modules/asn1-ber.js \
- $(NULL)
-
-nodemoduleslibberdir = $(nodedir)/node_modules/lib/ber
-dist_nodemoduleslibber_DATA = \
- node_modules/lib/ber/index.js \
- node_modules/lib/ber/errors.js \
- node_modules/lib/ber/reader.js \
- node_modules/lib/ber/types.js \
- node_modules/lib/ber/writer.js \
- $(NULL)
-
-all: all-am
-
-.SUFFIXES:
-.SUFFIXES: .in
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(srcdir)/fronius/Makefile.inc $(srcdir)/named/Makefile.inc $(srcdir)/sma_webbox/Makefile.inc $(srcdir)/snmp/Makefile.inc $(srcdir)/stiebeleltron/Makefile.inc $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/node.d.plugin/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/node.d.plugin/Makefile
-.PRECIOUS: Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-$(top_srcdir)/build/subst.inc $(srcdir)/fronius/Makefile.inc $(srcdir)/named/Makefile.inc $(srcdir)/sma_webbox/Makefile.inc $(srcdir)/snmp/Makefile.inc $(srcdir)/stiebeleltron/Makefile.inc:
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS)
- @$(NORMAL_INSTALL)
- @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
- done | \
- sed -e 'p;s,.*/,,;n' \
- -e 'h;s|.*|.|' \
- -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
- $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
- { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
- if ($$2 == $$4) { files[d] = files[d] " " $$1; \
- if (++n[d] == $(am__install_max)) { \
- print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
- else { print "f", d "/" $$4, $$1 } } \
- END { for (d in files) print "f", d, files[d] }' | \
- while read type dir files; do \
- if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
- test -z "$$files" || { \
- echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \
- $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \
- } \
- ; done
-
-uninstall-dist_pluginsSCRIPTS:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \
- files=`for p in $$list; do echo "$$p"; done | \
- sed -e 's,.*/,,;$(transform)'`; \
- dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir)
-install-dist_libconfigDATA: $(dist_libconfig_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \
- done
-
-uninstall-dist_libconfigDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir)
-install-dist_nodeDATA: $(dist_node_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_node_DATA)'; test -n "$(nodedir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(nodedir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(nodedir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(nodedir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(nodedir)" || exit $$?; \
- done
-
-uninstall-dist_nodeDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_node_DATA)'; test -n "$(nodedir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(nodedir)'; $(am__uninstall_files_from_dir)
-install-dist_nodeconfigDATA: $(dist_nodeconfig_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_nodeconfig_DATA)'; test -n "$(nodeconfigdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(nodeconfigdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(nodeconfigdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(nodeconfigdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(nodeconfigdir)" || exit $$?; \
- done
-
-uninstall-dist_nodeconfigDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_nodeconfig_DATA)'; test -n "$(nodeconfigdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(nodeconfigdir)'; $(am__uninstall_files_from_dir)
-install-dist_nodemodulesDATA: $(dist_nodemodules_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_nodemodules_DATA)'; test -n "$(nodemodulesdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(nodemodulesdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(nodemodulesdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(nodemodulesdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(nodemodulesdir)" || exit $$?; \
- done
-
-uninstall-dist_nodemodulesDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_nodemodules_DATA)'; test -n "$(nodemodulesdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(nodemodulesdir)'; $(am__uninstall_files_from_dir)
-install-dist_nodemoduleslibberDATA: $(dist_nodemoduleslibber_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_nodemoduleslibber_DATA)'; test -n "$(nodemoduleslibberdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(nodemoduleslibberdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(nodemoduleslibberdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(nodemoduleslibberdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(nodemoduleslibberdir)" || exit $$?; \
- done
-
-uninstall-dist_nodemoduleslibberDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_nodemoduleslibber_DATA)'; test -n "$(nodemoduleslibberdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(nodemoduleslibberdir)'; $(am__uninstall_files_from_dir)
-install-dist_usernodeconfigDATA: $(dist_usernodeconfig_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_usernodeconfig_DATA)'; test -n "$(usernodeconfigdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(usernodeconfigdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(usernodeconfigdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(usernodeconfigdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(usernodeconfigdir)" || exit $$?; \
- done
-
-uninstall-dist_usernodeconfigDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_usernodeconfig_DATA)'; test -n "$(usernodeconfigdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(usernodeconfigdir)'; $(am__uninstall_files_from_dir)
-tags TAGS:
-
-ctags CTAGS:
-
-cscope cscopelist:
-
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(SCRIPTS) $(DATA)
-installdirs:
- for dir in "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(libconfigdir)" "$(DESTDIR)$(nodedir)" "$(DESTDIR)$(nodeconfigdir)" "$(DESTDIR)$(nodemodulesdir)" "$(DESTDIR)$(nodemoduleslibberdir)" "$(DESTDIR)$(usernodeconfigdir)"; do \
- test -z "$$dir" || $(MKDIR_P) "$$dir"; \
- done
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
- -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-am
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-generic
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am: install-dist_libconfigDATA install-dist_nodeDATA \
- install-dist_nodeconfigDATA install-dist_nodemodulesDATA \
- install-dist_nodemoduleslibberDATA install-dist_pluginsSCRIPTS \
- install-dist_usernodeconfigDATA
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am:
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am: uninstall-dist_libconfigDATA uninstall-dist_nodeDATA \
- uninstall-dist_nodeconfigDATA uninstall-dist_nodemodulesDATA \
- uninstall-dist_nodemoduleslibberDATA \
- uninstall-dist_pluginsSCRIPTS \
- uninstall-dist_usernodeconfigDATA
-
-.MAKE: install-am install-strip
-
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dist_libconfigDATA \
- install-dist_nodeDATA install-dist_nodeconfigDATA \
- install-dist_nodemodulesDATA \
- install-dist_nodemoduleslibberDATA install-dist_pluginsSCRIPTS \
- install-dist_usernodeconfigDATA install-dvi install-dvi-am \
- install-exec install-exec-am install-html install-html-am \
- install-info install-info-am install-man install-pdf \
- install-pdf-am install-ps install-ps-am install-strip \
- installcheck installcheck-am installdirs maintainer-clean \
- maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
- pdf-am ps ps-am tags-am uninstall uninstall-am \
- uninstall-dist_libconfigDATA uninstall-dist_nodeDATA \
- uninstall-dist_nodeconfigDATA uninstall-dist_nodemodulesDATA \
- uninstall-dist_nodemoduleslibberDATA \
- uninstall-dist_pluginsSCRIPTS \
- uninstall-dist_usernodeconfigDATA
-
-.in:
- if sed \
- -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \
- -e 's#[@]sbindir_POST@#$(sbindir)#g' \
- -e 's#[@]sysconfdir_POST@#$(sysconfdir)#g' \
- -e 's#[@]pythondir_POST@#$(pythondir)#g' \
- -e 's#[@]configdir_POST@#$(configdir)#g' \
- -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \
- -e 's#[@]cachedir_POST@#$(cachedir)#g' \
- $< > $@.tmp; then \
- mv "$@.tmp" "$@"; \
- else \
- rm -f "$@.tmp"; \
- false; \
- fi
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/node.d.plugin/README.md b/collectors/node.d.plugin/README.md
index af8708c7..265b1ac5 100644
--- a/collectors/node.d.plugin/README.md
+++ b/collectors/node.d.plugin/README.md
@@ -230,3 +230,5 @@ The `service` object defines a set of functions to allow you send information to
*FIXME: document an operational node.d.plugin data collector - the best example is the
[snmp collector](snmp/snmp.node.js)*
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fnode.d.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/node.d.plugin/fronius/README.md b/collectors/node.d.plugin/fronius/README.md
index dd284699..72522637 100644
--- a/collectors/node.d.plugin/fronius/README.md
+++ b/collectors/node.d.plugin/fronius/README.md
@@ -118,3 +118,5 @@ The output of /solar_api/v1/GetPowerFlowRealtimeData.fcgi looks like this:
}
}
```
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fnode.d.plugin%2Ffronius%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/node.d.plugin/named/README.md b/collectors/node.d.plugin/named/README.md
index 977a5015..480cbc19 100644
--- a/collectors/node.d.plugin/named/README.md
+++ b/collectors/node.d.plugin/named/README.md
@@ -340,3 +340,5 @@ Verify it works by running the following command (the collector is written in no
curl "http://localhost:8888/json/v1/server"
```
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fnode.d.plugin%2Fnamed%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/node.d.plugin/node.d.plugin b/collectors/node.d.plugin/node.d.plugin
deleted file mode 100644
index 2570220c..00000000
--- a/collectors/node.d.plugin/node.d.plugin
+++ /dev/null
@@ -1,303 +0,0 @@
-#!/usr/bin/env bash
-':' //; exec "$(command -v nodejs || command -v node || echo "ERROR node IS NOT AVAILABLE IN THIS SYSTEM")" "$0" "$@"
-
-// shebang hack from:
-// http://unix.stackexchange.com/questions/65235/universal-node-js-shebang
-
-// Initially this is run as a shell script.
-// Then, the second line, finds nodejs or node or js in the system path
-// and executes it with the shell parameters.
-
-// netdata
-// real-time performance and health monitoring, done right!
-// (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-// --------------------------------------------------------------------------------------------------------------------
-
-'use strict';
-
-// --------------------------------------------------------------------------------------------------------------------
-// get NETDATA environment variables
-
-var NETDATA_PLUGINS_DIR = process.env.NETDATA_PLUGINS_DIR || __dirname;
-var NETDATA_USER_CONFIG_DIR = process.env.NETDATA_USER_CONFIG_DIR || '/usr/local/etc/netdata';
-var NETDATA_STOCK_CONFIG_DIR = process.env.NETDATA_STOCK_CONFIG_DIR || '/usr/local/lib/netdata/conf.d';
-var NETDATA_UPDATE_EVERY = process.env.NETDATA_UPDATE_EVERY || 1;
-var NODE_D_DIR = NETDATA_PLUGINS_DIR + '/../node.d';
-
-// make sure the modules are found
-process.mainModule.paths.unshift(NODE_D_DIR + '/node_modules');
-process.mainModule.paths.unshift(NODE_D_DIR);
-
-
-// --------------------------------------------------------------------------------------------------------------------
-// load required modules
-
-var fs = require('fs');
-var url = require('url');
-var util = require('util');
-var http = require('http');
-var path = require('path');
-var extend = require('extend');
-var netdata = require('netdata');
-
-
-// --------------------------------------------------------------------------------------------------------------------
-// configuration
-
-function netdata_read_json_config_file(module_filename) {
- var f = path.basename(module_filename);
-
- var ufilename, sfilename;
-
- var m = f.match('.plugin' + '$');
- if(m !== null) {
- ufilename = netdata.options.paths.config + '/' + f.substring(0, m.index) + '.conf';
- sfilename = netdata.options.paths.stock_config + '/' + f.substring(0, m.index) + '.conf';
- }
-
- m = f.match('.node.js' + '$');
- if(m !== null) {
- ufilename = netdata.options.paths.config + '/node.d/' + f.substring(0, m.index) + '.conf';
- sfilename = netdata.options.paths.stock_config + '/node.d/' + f.substring(0, m.index) + '.conf';
- }
-
- try {
- netdata.debug('loading module\'s ' + module_filename + ' user-config ' + ufilename);
- return JSON.parse(fs.readFileSync(ufilename, 'utf8'));
- }
- catch(e) {
- netdata.error('Cannot read user-configuration file ' + ufilename + ': ' + e.message + '.');
- dumpError(e);
- }
-
- try {
- netdata.debug('loading module\'s ' + module_filename + ' stock-config ' + sfilename);
- return JSON.parse(fs.readFileSync(sfilename, 'utf8'));
- }
- catch(e) {
- netdata.error('Cannot read stock-configuration file ' + sfilename + ': ' + e.message + ', using internal defaults.');
- dumpError(e);
- }
-
- return {};
-}
-
-// internal defaults
-extend(true, netdata.options, {
- filename: path.basename(__filename),
-
- update_every: NETDATA_UPDATE_EVERY,
-
- paths: {
- plugins: NETDATA_PLUGINS_DIR,
- config: NETDATA_USER_CONFIG_DIR,
- stock_config: NETDATA_STOCK_CONFIG_DIR,
- modules: []
- },
-
- modules_enable_autodetect: true,
- modules_enable_all: true,
- modules: {}
-});
-
-// load configuration file
-netdata.options_loaded = netdata_read_json_config_file(__filename);
-extend(true, netdata.options, netdata.options_loaded);
-
-if(!netdata.options.paths.plugins)
- netdata.options.paths.plugins = NETDATA_PLUGINS_DIR;
-
-if(!netdata.options.paths.config)
- netdata.options.paths.config = NETDATA_USER_CONFIG_DIR;
-
-if(!netdata.options.paths.stock_config)
- netdata.options.paths.stock_config = NETDATA_STOCK_CONFIG_DIR;
-
-// console.error('merged netdata object:');
-// console.error(util.inspect(netdata, {depth: 10}));
-
-
-// apply module paths to node.js process
-function applyModulePaths() {
- var len = netdata.options.paths.modules.length;
- while(len--)
- process.mainModule.paths.unshift(netdata.options.paths.modules[len]);
-}
-applyModulePaths();
-
-
-// --------------------------------------------------------------------------------------------------------------------
-// tracing
-
-function dumpError(err) {
- if (typeof err === 'object') {
- if (err.stack) {
- netdata.debug(err.stack);
- }
- }
-}
-
-// --------------------------------------------------------------------------------------------------------------------
-// get command line arguments
-{
- var found_myself = false;
- var found_number = false;
- var found_modules = false;
- process.argv.forEach(function (val, index, array) {
- netdata.debug('PARAM: ' + val);
-
- if(!found_myself) {
- if(val === __filename)
- found_myself = true;
- }
- else {
- switch(val) {
- case 'debug':
- netdata.options.DEBUG = true;
- netdata.debug('DEBUG enabled');
- break;
-
- default:
- if(found_number === true) {
- if(found_modules === false) {
- for(var i in netdata.options.modules)
- netdata.options.modules[i].enabled = false;
- }
-
- if(typeof netdata.options.modules[val] === 'undefined')
- netdata.options.modules[val] = {};
-
- netdata.options.modules[val].enabled = true;
- netdata.options.modules_enable_all = false;
- netdata.debug('enabled module ' + val);
- }
- else {
- try {
- var x = parseInt(val);
- if(x > 0) {
- netdata.options.update_every = x;
- if(netdata.options.update_every < NETDATA_UPDATE_EVERY) {
- netdata.options.update_every = NETDATA_UPDATE_EVERY;
- netdata.debug('Update frequency ' + x + 's is too low');
- }
-
- found_number = true;
- netdata.debug('Update frequency set to ' + netdata.options.update_every + ' seconds');
- }
- else netdata.error('Ignoring parameter: ' + val);
- }
- catch(e) {
- netdata.error('Cannot get value of parameter: ' + val);
- dumpError(e);
- }
- }
- break;
- }
- }
- });
-}
-
-if(netdata.options.update_every < 1) {
- netdata.debug('Adjusting update frequency to 1 second');
- netdata.options.update_every = 1;
-}
-
-// --------------------------------------------------------------------------------------------------------------------
-// find modules
-
-function findModules() {
- var found = 0;
-
- var files = fs.readdirSync(NODE_D_DIR);
- var len = files.length;
- while(len--) {
- var m = files[len].match('.node.js' + '$');
- if(m !== null) {
- var n = files[len].substring(0, m.index);
-
- if(typeof(netdata.options.modules[n]) === 'undefined')
- netdata.options.modules[n] = { name: n, enabled: netdata.options.modules_enable_all };
-
- if(netdata.options.modules[n].enabled === true) {
- netdata.options.modules[n].name = n;
- netdata.options.modules[n].filename = NODE_D_DIR + '/' + files[len];
- netdata.options.modules[n].loaded = false;
-
- // load the module
- try {
- netdata.debug('loading module ' + netdata.options.modules[n].filename);
- netdata.options.modules[n].module = require(netdata.options.modules[n].filename);
- netdata.options.modules[n].module.name = n;
- netdata.debug('loaded module ' + netdata.options.modules[n].name + ' from ' + netdata.options.modules[n].filename);
- }
- catch(e) {
- netdata.options.modules[n].enabled = false;
- netdata.error('Cannot load module: ' + netdata.options.modules[n].filename + ' exception: ' + e);
- dumpError(e);
- continue;
- }
-
- // load its configuration
- var c = {
- enable_autodetect: netdata.options.modules_enable_autodetect,
- update_every: netdata.options.update_every
- };
-
- var c2 = netdata_read_json_config_file(files[len]);
- extend(true, c, c2);
-
- // call module auto-detection / configuration
- try {
- netdata.modules_configuring++;
- netdata.debug('Configuring module ' + netdata.options.modules[n].name);
- var serv = netdata.configure(netdata.options.modules[n].module, c, function() {
- netdata.debug('Configured module ' + netdata.options.modules[n].name);
- netdata.modules_configuring--;
- });
-
- netdata.debug('Configuring module ' + netdata.options.modules[n].name + ' reports ' + serv + ' eligible services.');
- }
- catch(e) {
- netdata.modules_configuring--;
- netdata.options.modules[n].enabled = false;
- netdata.error('Failed module auto-detection: ' + netdata.options.modules[n].name + ' exception: ' + e + ', disabling module.');
- dumpError(e);
- continue;
- }
-
- netdata.options.modules[n].loaded = true;
- found++;
- }
- }
- }
-
- // netdata.debug(netdata.options.modules);
- return found;
-}
-
-if(findModules() === 0) {
- netdata.error('Cannot load any .node.js module from: ' + NODE_D_DIR);
- netdata.disableNodePlugin();
- process.exit(1);
-}
-
-
-// --------------------------------------------------------------------------------------------------------------------
-// start
-
-function start_when_configuring_ends() {
- if(netdata.modules_configuring > 0) {
- netdata.debug('Waiting modules configuration, still running ' + netdata.modules_configuring);
- setTimeout(start_when_configuring_ends, 500);
- return;
- }
-
- netdata.modules_configuring = 0;
- netdata.start();
-}
-start_when_configuring_ends();
-
-//netdata.debug('netdata object:')
-//netdata.debug(netdata);
diff --git a/collectors/node.d.plugin/sma_webbox/README.md b/collectors/node.d.plugin/sma_webbox/README.md
index 1512c700..cff7645d 100644
--- a/collectors/node.d.plugin/sma_webbox/README.md
+++ b/collectors/node.d.plugin/sma_webbox/README.md
@@ -1,4 +1,6 @@
+# SMA Sunny Webbox
+
[SMA Sunny Webbox](http://files.sma.de/dl/4253/WEBBOX-DUS131916W.pdf)
Example netdata configuration for node.d/sma_webbox.conf
@@ -23,3 +25,5 @@ The module supports any number of name servers, like this:
]
}
```
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fnode.d.plugin%2Fsma_webbox%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/node.d.plugin/sma_webbox/sma_webbox.node.js b/collectors/node.d.plugin/sma_webbox/sma_webbox.node.js
index b9a168ad..aa60ae81 100644
--- a/collectors/node.d.plugin/sma_webbox/sma_webbox.node.js
+++ b/collectors/node.d.plugin/sma_webbox/sma_webbox.node.js
@@ -73,10 +73,11 @@ var webbox = {
if(found > 0 && service.added !== true)
service.commit();
+
// Grid Current Power Chart
if(d['GriPwr'].value !== null) {
- var id = 'smawebbox_' + service.name + '.current';
- var chart = webbox.charts[id];
+ const id = 'smawebbox_' + service.name + '.current';
+ let chart = webbox.charts[id];
if(typeof chart === 'undefined') {
chart = {
@@ -111,8 +112,8 @@ var webbox = {
}
if(d['GriEgyTdy'].value !== null) {
- var id = 'smawebbox_' + service.name + '.today';
- var chart = webbox.charts[id];
+ const id = 'smawebbox_' + service.name + '.today';
+ let chart = webbox.charts[id];
if(typeof chart === 'undefined') {
chart = {
@@ -147,8 +148,8 @@ var webbox = {
}
if(d['GriEgyTot'].value !== null) {
- var id = 'smawebbox_' + service.name + '.total';
- var chart = webbox.charts[id];
+ const id = 'smawebbox_' + service.name + '.total';
+ let chart = webbox.charts[id];
if(typeof chart === 'undefined') {
chart = {
diff --git a/collectors/node.d.plugin/snmp/README.md b/collectors/node.d.plugin/snmp/README.md
index a307a364..832108b9 100644
--- a/collectors/node.d.plugin/snmp/README.md
+++ b/collectors/node.d.plugin/snmp/README.md
@@ -92,16 +92,14 @@ In this example:
`family` sets the name of the submenu of the dashboard each chart will appear under.
-If you need to define many charts using incremental OIDs, you can use something like this:
-
-This is like the previous, but the option `multiply_range` given, will multiply the current chart from `1` to `24` inclusive, producing 24 charts in total for the 24 ports of the switch `10.11.12.8`.
+`multiplier` and `divisor` are passed by the plugin to the Netdata daemon and are applied to the metric to convert it properly to `units`. For incremental counters with the exception of Counter64 type metrics, `offset` is added to the metric from within the SNMP plugin. This means that the value you will see in debug mode in the `DEBUG: setting current chart to... SET` line for a metric will not have been multiplied or divided, but it will have had the offset added to it.
-Each of the 24 new charts will have its id (1-24) appended at:
+<details markdown="1"><summary><b>Caution: Counter64 metrics do not support `offset` (issue #5028).</b></summary>
+The SNMP plugin supports Counter64 metrics with the only limitation that the `offset` parameter should not be defined. Due to the way Javascript handles large numbers and the fact that the offset is applied to metrics inside the plugin, the offset will be ignored silently.
+</details>
+<br>
+If you need to define many charts using incremental OIDs, you can use something like this:
-1. its chart unique id, i.e. `snmp_switch.bandwidth_port1` to `snmp_switch.bandwidth_port24`
-2. its `title`, i.e. `Switch Bandwidth for port 1` to `Switch Bandwidth for port 24`
-3. its `oid` (for all dimensions), i.e. dimension `in` will be `1.3.6.1.2.1.2.2.1.10.1` to `1.3.6.1.2.1.2.2.1.10.24`
-3. its priority (which will be incremented for each chart so that the charts will appear on the dashboard in this order)
```json
{
@@ -144,6 +142,16 @@ Each of the 24 new charts will have its id (1-24) appended at:
}
```
+This is like the previous, but the option `multiply_range` given, will multiply the current chart from `1` to `24` inclusive, producing 24 charts in total for the 24 ports of the switch `10.11.12.8`.
+
+Each of the 24 new charts will have its id (1-24) appended at:
+
+1. its chart unique id, i.e. `snmp_switch.bandwidth_port1` to `snmp_switch.bandwidth_port24`
+2. its `title`, i.e. `Switch Bandwidth for port 1` to `Switch Bandwidth for port 24`
+3. its `oid` (for all dimensions), i.e. dimension `in` will be `1.3.6.1.2.1.2.2.1.10.1` to `1.3.6.1.2.1.2.2.1.10.24`
+3. its priority (which will be incremented for each chart so that the charts will appear on the dashboard in this order)
+
+
The `options` given for each server, are:
- `timeout`, the time to wait for the SNMP device to respond. The default is 5000 ms.
@@ -355,3 +363,5 @@ This switch has a very slow SNMP processors. To respond, it needs about 8 second
]
}
```
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fnode.d.plugin%2Fsnmp%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/node.d.plugin/snmp/snmp.node.js b/collectors/node.d.plugin/snmp/snmp.node.js
index a051d3d3..6b33ae0d 100644
--- a/collectors/node.d.plugin/snmp/snmp.node.js
+++ b/collectors/node.d.plugin/snmp/snmp.node.js
@@ -265,7 +265,7 @@ netdata.processors.snmp = {
if(__DEBUG === true)
netdata.debug(service.module.name + ': ' + service.name + ': failed ' + service.module.name + ' get for OIDs ' + varbinds[i].oid);
- service.error('OID ' + varbinds[i].oid + ' gave error: ' + snmp.varbindError(varbinds[i]));
+ service.error('OID ' + varbinds[i].oid + ' gave error: ' + net_snmp.varbindError(varbinds[i]));
value = null;
failed++;
}
@@ -394,7 +394,7 @@ var snmp = {
var d = dim_keys[j];
if (dimensions[d].value !== null) {
- if(typeof dimensions[d].offset === 'number')
+ if(typeof dimensions[d].offset === 'number' && typeof dimensions[d].value === 'number')
service.set(d, dimensions[d].value + dimensions[d].offset);
else
service.set(d, dimensions[d].value);
diff --git a/collectors/node.d.plugin/stiebeleltron/README.md b/collectors/node.d.plugin/stiebeleltron/README.md
index 002a3157..4aa5a43e 100644
--- a/collectors/node.d.plugin/stiebeleltron/README.md
+++ b/collectors/node.d.plugin/stiebeleltron/README.md
@@ -503,3 +503,5 @@ The charts are being generated using the configuration below. So if your install
]
}
```
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fnode.d.plugin%2Fstiebeleltron%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/plugins.d/Makefile.in b/collectors/plugins.d/Makefile.in
deleted file mode 100644
index b2c11281..00000000
--- a/collectors/plugins.d/Makefile.in
+++ /dev/null
@@ -1,647 +0,0 @@
-# Makefile.in generated by automake 1.14.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2013 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-VPATH = @srcdir@
-am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-subdir = collectors/plugins.d
-DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
- $(dist_noinst_DATA)
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \
- ctags-recursive dvi-recursive html-recursive info-recursive \
- install-data-recursive install-dvi-recursive \
- install-exec-recursive install-html-recursive \
- install-info-recursive install-pdf-recursive \
- install-ps-recursive install-recursive installcheck-recursive \
- installdirs-recursive pdf-recursive ps-recursive \
- tags-recursive uninstall-recursive
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-DATA = $(dist_noinst_DATA)
-RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \
- distclean-recursive maintainer-clean-recursive
-am__recursive_targets = \
- $(RECURSIVE_TARGETS) \
- $(RECURSIVE_CLEAN_TARGETS) \
- $(am__extra_recursive_targets)
-AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \
- distdir
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-# Read a list of newline-separated strings from the standard input,
-# and print each of them once, without duplicates. Input order is
-# *not* preserved.
-am__uniquify_input = $(AWK) '\
- BEGIN { nonempty = 0; } \
- { items[$$0] = 1; nonempty = 1; } \
- END { if (nonempty) { for (i in items) print i; }; } \
-'
-# Make sure the list of sources is unique. This is necessary because,
-# e.g., the same source file might be shared among _SOURCES variables
-# for different programs/libraries.
-am__define_uniq_tagged_files = \
- list='$(am__tagged_files)'; \
- unique=`for i in $$list; do \
- if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
- done | $(am__uniquify_input)`
-ETAGS = etags
-CTAGS = ctags
-DIST_SUBDIRS = $(SUBDIRS)
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-am__relativize = \
- dir0=`pwd`; \
- sed_first='s,^\([^/]*\)/.*$$,\1,'; \
- sed_rest='s,^[^/]*/*,,'; \
- sed_last='s,^.*/\([^/]*\)$$,\1,'; \
- sed_butlast='s,/*[^/]*$$,,'; \
- while test -n "$$dir1"; do \
- first=`echo "$$dir1" | sed -e "$$sed_first"`; \
- if test "$$first" != "."; then \
- if test "$$first" = ".."; then \
- dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \
- dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \
- else \
- first2=`echo "$$dir2" | sed -e "$$sed_first"`; \
- if test "$$first2" = "$$first"; then \
- dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \
- else \
- dir2="../$$dir2"; \
- fi; \
- dir0="$$dir0"/"$$first"; \
- fi; \
- fi; \
- dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \
- done; \
- reldir="$$dir2"
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-SUBDIRS = \
- $(NULL)
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
-
-all: all-recursive
-
-.SUFFIXES:
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/plugins.d/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/plugins.d/Makefile
-.PRECIOUS: Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-
-# This directory's subdirectories are mostly independent; you can cd
-# into them and run 'make' without going through this Makefile.
-# To change the values of 'make' variables: instead of editing Makefiles,
-# (1) if the variable is set in 'config.status', edit 'config.status'
-# (which will cause the Makefiles to be regenerated when you run 'make');
-# (2) otherwise, pass the desired values on the 'make' command line.
-$(am__recursive_targets):
- @fail=; \
- if $(am__make_keepgoing); then \
- failcom='fail=yes'; \
- else \
- failcom='exit 1'; \
- fi; \
- dot_seen=no; \
- target=`echo $@ | sed s/-recursive//`; \
- case "$@" in \
- distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
- *) list='$(SUBDIRS)' ;; \
- esac; \
- for subdir in $$list; do \
- echo "Making $$target in $$subdir"; \
- if test "$$subdir" = "."; then \
- dot_seen=yes; \
- local_target="$$target-am"; \
- else \
- local_target="$$target"; \
- fi; \
- ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
- || eval $$failcom; \
- done; \
- if test "$$dot_seen" = "no"; then \
- $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
- fi; test -z "$$fail"
-
-ID: $(am__tagged_files)
- $(am__define_uniq_tagged_files); mkid -fID $$unique
-tags: tags-recursive
-TAGS: tags
-
-tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
- set x; \
- here=`pwd`; \
- if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
- include_option=--etags-include; \
- empty_fix=.; \
- else \
- include_option=--include; \
- empty_fix=; \
- fi; \
- list='$(SUBDIRS)'; for subdir in $$list; do \
- if test "$$subdir" = .; then :; else \
- test ! -f $$subdir/TAGS || \
- set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
- fi; \
- done; \
- $(am__define_uniq_tagged_files); \
- shift; \
- if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
- test -n "$$unique" || unique=$$empty_fix; \
- if test $$# -gt 0; then \
- $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
- "$$@" $$unique; \
- else \
- $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
- $$unique; \
- fi; \
- fi
-ctags: ctags-recursive
-
-CTAGS: ctags
-ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
- $(am__define_uniq_tagged_files); \
- test -z "$(CTAGS_ARGS)$$unique" \
- || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
- $$unique
-
-GTAGS:
- here=`$(am__cd) $(top_builddir) && pwd` \
- && $(am__cd) $(top_srcdir) \
- && gtags -i $(GTAGS_ARGS) "$$here"
-cscopelist: cscopelist-recursive
-
-cscopelist-am: $(am__tagged_files)
- list='$(am__tagged_files)'; \
- case "$(srcdir)" in \
- [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
- *) sdir=$(subdir)/$(srcdir) ;; \
- esac; \
- for i in $$list; do \
- if test -f "$$i"; then \
- echo "$(subdir)/$$i"; \
- else \
- echo "$$sdir/$$i"; \
- fi; \
- done >> $(top_builddir)/cscope.files
-
-distclean-tags:
- -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
- @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
- if test "$$subdir" = .; then :; else \
- $(am__make_dryrun) \
- || test -d "$(distdir)/$$subdir" \
- || $(MKDIR_P) "$(distdir)/$$subdir" \
- || exit 1; \
- dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
- $(am__relativize); \
- new_distdir=$$reldir; \
- dir1=$$subdir; dir2="$(top_distdir)"; \
- $(am__relativize); \
- new_top_distdir=$$reldir; \
- echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \
- echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \
- ($(am__cd) $$subdir && \
- $(MAKE) $(AM_MAKEFLAGS) \
- top_distdir="$$new_top_distdir" \
- distdir="$$new_distdir" \
- am__remove_distdir=: \
- am__skip_length_check=: \
- am__skip_mode_fix=: \
- distdir) \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-recursive
-all-am: Makefile $(DATA)
-installdirs: installdirs-recursive
-installdirs-am:
-install: install-recursive
-install-exec: install-exec-recursive
-install-data: install-data-recursive
-uninstall: uninstall-recursive
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-recursive
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-recursive
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-recursive
- -rm -f Makefile
-distclean-am: clean-am distclean-generic distclean-tags
-
-dvi: dvi-recursive
-
-dvi-am:
-
-html: html-recursive
-
-html-am:
-
-info: info-recursive
-
-info-am:
-
-install-data-am:
-
-install-dvi: install-dvi-recursive
-
-install-dvi-am:
-
-install-exec-am:
-
-install-html: install-html-recursive
-
-install-html-am:
-
-install-info: install-info-recursive
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-recursive
-
-install-pdf-am:
-
-install-ps: install-ps-recursive
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-recursive
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-recursive
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-recursive
-
-pdf-am:
-
-ps: ps-recursive
-
-ps-am:
-
-uninstall-am:
-
-.MAKE: $(am__recursive_targets) install-am install-strip
-
-.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \
- check-am clean clean-generic cscopelist-am ctags ctags-am \
- distclean distclean-generic distclean-tags distdir dvi dvi-am \
- html html-am info info-am install install-am install-data \
- install-data-am install-dvi install-dvi-am install-exec \
- install-exec-am install-html install-html-am install-info \
- install-info-am install-man install-pdf install-pdf-am \
- install-ps install-ps-am install-strip installcheck \
- installcheck-am installdirs installdirs-am maintainer-clean \
- maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
- pdf-am ps ps-am tags tags-am uninstall uninstall-am
-
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/plugins.d/README.md b/collectors/plugins.d/README.md
index c5981803..6f5294cd 100644
--- a/collectors/plugins.d/README.md
+++ b/collectors/plugins.d/README.md
@@ -1,4 +1,4 @@
-# Netdata External Plugins
+# External plugins overview
`plugins.d` is the netdata internal plugin that collects metrics
from external processes, thus allowing netdata to use **external plugins**.
@@ -9,6 +9,7 @@ plugin|language|O/S|description
:---:|:---:|:---:|:---
[apps.plugin](../apps.plugin/)|`C`|linux, freebsd|monitors the whole process tree on Linux and FreeBSD and breaks down system resource usage by **process**, **user** and **user group**.
[charts.d.plugin](../charts.d.plugin/)|`BASH`|all|a **plugin orchestrator** for data collection modules written in `BASH` v4+.
+[cups.plugin](../cups.plugin/)|`C`|all|monitors **CUPS**
[fping.plugin](../fping.plugin/)|`C`|all|measures network latency, jitter and packet loss between the monitored node and any number of remote network end points.
[freeipmi.plugin](../freeipmi.plugin/)|`C`|linux|collects metrics from enterprise hardware sensors, on Linux servers.
[node.d.plugin](../node.d.plugin/)|`node.js`|all|a **plugin orchestrator** for data collection modules written in `node.js`.
@@ -88,7 +89,7 @@ For example, for `apps.plugin` the following section is available:
- `command options` allows giving additional command line options to the plugin.
-Netdata will provide to the extrenal plugins the environment variable `NETDATA_UPDATE_EVERY`, in seconds (the default is 1). This is the **minimum update frequency** for all charts. A plugin that is updating values more frequently than this, is just wasting resources.
+Netdata will provide to the external plugins the environment variable `NETDATA_UPDATE_EVERY`, in seconds (the default is 1). This is the **minimum update frequency** for all charts. A plugin that is updating values more frequently than this, is just wasting resources.
Netdata will call the plugin with just one command line parameter: the number of seconds the user requested this plugin to update its data (by default is also 1).
@@ -390,8 +391,6 @@ or do not output the line at all.
Of course, C is the most efficient way of collecting data. This is why netdata itself is written in C.
-## Properly Writing Plugins
-
## Writing Plugins Properly
There are a few rules for writing plugins properly:
@@ -403,7 +402,7 @@ There are a few rules for writing plugins properly:
- Initialize everything once, at the beginning. Initialization is not an expensive operation. Your plugin will most probably be started once and run forever. So, do whatever heavy operation is needed at the beginning, just once.
- Do the absolutely minimum while iterating to collect values repeatedly.
- If you need to connect to another server to collect values, avoid re-connects if possible. Connect just once, with keep-alive (for HTTP) enabled and collect values using the same connection.
- - Avoid any CPU or memory heavy operation while collecting data. If you control memory allocation, avoid any memory allocation white iterating to collect values.
+ - Avoid any CPU or memory heavy operation while collecting data. If you control memory allocation, avoid any memory allocation while iterating to collect values.
- Avoid running external commands when possible. If you are writing shell scripts avoid especially pipes (each pipe is another fork, a very expensive operation).
2. The best way to iterate at a constant pace is this pseudo code:
@@ -471,3 +470,5 @@ There are a few rules for writing plugins properly:
4. If possible, try to autodetect if your plugin should be enabled, without any configuration.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fplugins.d%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/proc.plugin/Makefile.in b/collectors/proc.plugin/Makefile.in
deleted file mode 100644
index f6db90c8..00000000
--- a/collectors/proc.plugin/Makefile.in
+++ /dev/null
@@ -1,464 +0,0 @@
-# Makefile.in generated by automake 1.14.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2013 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-VPATH = @srcdir@
-am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-subdir = collectors/proc.plugin
-DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
- $(dist_noinst_DATA)
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-DATA = $(dist_noinst_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-dist_noinst_DATA = \
- README.md \
- $(NULL)
-
-all: all-am
-
-.SUFFIXES:
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/proc.plugin/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/proc.plugin/Makefile
-.PRECIOUS: Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-tags TAGS:
-
-ctags CTAGS:
-
-cscope cscopelist:
-
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(DATA)
-installdirs:
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-am
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-generic
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am:
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am:
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am:
-
-.MAKE: install-am install-strip
-
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dvi install-dvi-am install-exec \
- install-exec-am install-html install-html-am install-info \
- install-info-am install-man install-pdf install-pdf-am \
- install-ps install-ps-am install-strip installcheck \
- installcheck-am installdirs maintainer-clean \
- maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
- pdf-am ps ps-am tags-am uninstall uninstall-am
-
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/proc.plugin/README.md b/collectors/proc.plugin/README.md
index 12306565..de62aeca 100755..100644
--- a/collectors/proc.plugin/README.md
+++ b/collectors/proc.plugin/README.md
@@ -2,6 +2,7 @@
- `/proc/net/dev` (all network interfaces for all their values)
- `/proc/diskstats` (all disks for all their values)
+ - `/proc/mdstat` (status of RAID arrays)
- `/proc/net/snmp` (total IPv4, TCP and UDP usage)
- `/proc/net/snmp6` (total IPv6 usage)
- `/proc/net/netstat` (more IPv4 usage)
@@ -18,6 +19,7 @@
- `/proc/softirqs` (total and per core software interrupts)
- `/proc/loadavg` (system load and total processes running)
- `/proc/sys/kernel/random/entropy_avail` (random numbers pool availability - used in cryptography)
+ - `/sys/class/power_supply` (power supply properties)
- `ksm` Kernel Same-Page Merging performance (several files under `/sys/kernel/mm/ksm`).
- `netdata` (internal netdata resources utilization)
@@ -117,7 +119,7 @@ Then edit `netdata.conf` and find the following section. This is the basic plugi
# path to get h/w sector size = /sys/block/%s/queue/hw_sector_size
# path to get h/w sector size for partitions = /sys/dev/block/%lu:%lu/subsystem/%s/../queue
/hw_sector_size
-
+
```
For each virtual disk, physical disk and partition you will have a section like this:
@@ -160,13 +162,52 @@ But sometimes you need disable performance metrics for all devices with the same
251 2 zram2 27487 0 219896 188 79953 0 639624 1640 0 1828 1828
251 3 zram3 27348 0 218784 152 79952 0 639616 1960 0 2060 2104
```
-All zram devices starts with `251` number and all loop devices starts with `7`.
+All zram devices starts with `251` number and all loop devices starts with `7`.
So, to disable performance metrics for all loop devices you could add `performance metrics for disks with major 7 = no` to `[plugin:proc:/proc/diskstats]` section.
```
[plugin:proc:/proc/diskstats]
performance metrics for disks with major 7 = no
```
+## Monitoring RAID arrays
+
+### Monitored RAID array metrics
+
+1. **Health** Number of failed disks in every array (aggregate chart).
+
+2. **Disks stats**
+ * total (number of devices array ideally would have)
+ * inuse (number of devices currently are in use)
+
+3. **Mismatch count**
+ * unsynchronized blocks
+
+4. **Current status**
+ * resync in percent
+ * recovery in percent
+ * reshape in percent
+ * check in percent
+
+5. **Operation status** (if resync/recovery/reshape/check is active)
+ * finish in minutes
+ * speed in megabytes/s
+
+6. **Nonredundant array availability**
+
+#### configuration
+
+```
+[plugin:proc:/proc/mdstat]
+ # faulty devices = yes
+ # nonredundant arrays availability = yes
+ # mismatch count = auto
+ # disk stats = yes
+ # operation status = yes
+ # make charts obsolete = yes
+ # filename to monitor = /proc/mdstat
+ # mismatch_cnt filename to monitor = /sys/block/%s/md/mismatch_cnt
+```
+
## Monitoring CPUs
The `/proc/stat` module monitors CPU utilization, interrupts, context switches, processes started/running, thermal throttling, frequency, and idle states. It gathers this information from multiple files.
@@ -219,7 +260,7 @@ SYNPROXY is a netfilter module, in the Linux kernel (since version 3.12). It is
The net effect of this, is that the real servers will not notice any change during the attack. The valid TCP connections will pass through and served, while the attack will be stopped at the firewall.
-To use SYNPROXY on your firewall, please follow our setup guides:
+Netdata does not enable SYNPROXY. It just uses the SYNPROXY metrics exposed by your kernel, so you will first need to configure it. The hard way is to run iptables SYNPROXY commands directly on the console. An easier way is to use [FireHOL](https://firehol.org/), which, is a firewall manager for iptables. FireHOL can configure SYNPROXY using the following setup guides:
- **[Working with SYNPROXY](https://github.com/firehol/firehol/wiki/Working-with-SYNPROXY)**
- **[Working with SYNPROXY and traps](https://github.com/firehol/firehol/wiki/Working-with-SYNPROXY-and-traps)**
@@ -239,4 +280,67 @@ Example image:
![ddos](https://cloud.githubusercontent.com/assets/2662304/14398891/6016e3fc-fdf0-11e5-942b-55de6a52cb66.gif)
-See Linux Anti-DDoS in action at: **[netdata demo site (with SYNPROXY enabled)](https://registry.my-netdata.io/#menu_netfilter_submenu_synproxy)**
+See Linux Anti-DDoS in action at: **[netdata demo site (with SYNPROXY enabled)](https://registry.my-netdata.io/#menu_netfilter_submenu_synproxy)**
+
+## Linux power supply
+
+This module monitors various metrics reported by power supply drivers
+on Linux. This allows tracking and alerting on things like remaining
+battery capacity.
+
+Depending on the underlying driver, it may provide the following charts
+and metrics:
+
+1. Capacity: The power supply capacity expressed as a percentage.
+ * capacity\_now
+
+2. Charge: The charge for the power supply, expressed as amphours.
+ * charge\_full\_design
+ * charge\_full
+ * charge\_now
+ * charge\_empty
+ * charge\_empty\_design
+
+3. Energy: The energy for the power supply, expressed as watthours.
+ * energy\_full\_design
+ * energy\_full
+ * energy\_now
+ * energy\_empty
+ * energy\_empty\_design
+
+2. Voltage: The voltage for the power supply, expressed as volts.
+ * voltage\_max\_design
+ * voltage\_max
+ * voltage\_now
+ * voltage\_min
+ * voltage\_min\_design
+
+#### configuration
+
+```
+[plugin:proc:/sys/class/power_supply]
+ # battery capacity = yes
+ # battery charge = no
+ # battery energy = no
+ # power supply voltage = no
+ # keep files open = auto
+ # directory to monitor = /sys/class/power_supply
+```
+
+#### notes
+
+* Most drivers provide at least the first chart. Battery powered ACPI
+compliant systems (like most laptops) provide all but the third, but do
+not provide all of the metrics for each chart.
+
+* Current, energy, and voltages are reported with a _very_ high precision
+by the power\_supply framework. Usually, this is far higher than the
+actual hardware supports reporting, so expect to see changes in these
+charts jump instead of scaling smoothly.
+
+* If `max` or `full` attribute is defined by the driver, but not a
+corresponding `min` or `empty` attribute, then Netdata will still provide
+the corresponding `min` or `empty`, which will then always read as zero.
+This way, alerts which match on these will still work.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fproc.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/proc.plugin/plugin_proc.c b/collectors/proc.plugin/plugin_proc.c
index 0c3244d6..343acfa3 100644
--- a/collectors/proc.plugin/plugin_proc.c
+++ b/collectors/proc.plugin/plugin_proc.c
@@ -49,6 +49,7 @@ static struct proc_module {
// disk metrics
{ .name = "/proc/diskstats", .dim = "diskstats", .func = do_proc_diskstats },
+ { .name = "/proc/mdstat", .dim = "mdstat", .func = do_proc_mdstat },
// NFS metrics
{ .name = "/proc/net/rpc/nfsd", .dim = "nfsd", .func = do_proc_net_rpc_nfsd },
@@ -63,6 +64,9 @@ static struct proc_module {
// IPC metrics
{ .name = "ipc", .dim = "ipc", .func = do_ipc },
+ // linux power supply metrics
+ { .name = "/sys/class/power_supply", .dim = "power_supply", .func = do_sys_class_power_supply },
+
// the terminator of this array
{ .name = NULL, .dim = NULL, .func = NULL }
};
diff --git a/collectors/proc.plugin/plugin_proc.h b/collectors/proc.plugin/plugin_proc.h
index bfefe1ad..0c2afe77 100644
--- a/collectors/proc.plugin/plugin_proc.h
+++ b/collectors/proc.plugin/plugin_proc.h
@@ -26,6 +26,7 @@ extern void *proc_main(void *ptr);
extern int do_proc_net_dev(int update_every, usec_t dt);
extern int do_proc_diskstats(int update_every, usec_t dt);
+extern int do_proc_mdstat(int update_every, usec_t dt);
extern int do_proc_net_snmp(int update_every, usec_t dt);
extern int do_proc_net_snmp6(int update_every, usec_t dt);
extern int do_proc_net_netstat(int update_every, usec_t dt);
@@ -52,6 +53,7 @@ extern int do_proc_net_sockstat(int update_every, usec_t dt);
extern int do_proc_net_sockstat6(int update_every, usec_t dt);
extern int do_proc_net_sctp_snmp(int update_every, usec_t dt);
extern int do_ipc(int update_every, usec_t dt);
+extern int do_sys_class_power_supply(int update_every, usec_t dt);
extern int get_numa_node_count(void);
// metrics that need to be shared among data collectors
diff --git a/collectors/proc.plugin/proc_diskstats.c b/collectors/proc.plugin/proc_diskstats.c
index 387b395a..51fe7f46 100644
--- a/collectors/proc.plugin/proc_diskstats.c
+++ b/collectors/proc.plugin/proc_diskstats.c
@@ -798,7 +798,7 @@ int do_proc_diskstats(int update_every, usec_t dt) {
global_bcache_priority_stats_update_every = (int)config_get_number(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "bcache priority stats update every", global_bcache_priority_stats_update_every);
global_cleanup_removed_disks = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "remove charts of removed disks" , global_cleanup_removed_disks);
-
+
char buffer[FILENAME_MAX + 1];
snprintfz(buffer, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/block/%s");
@@ -960,7 +960,7 @@ int do_proc_diskstats(int update_every, usec_t dt) {
, family
, "disk.io"
, "Disk I/O Bandwidth"
- , "kilobytes/s"
+ , "KiB/s"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_DISKSTATS_NAME
, NETDATA_CHART_PRIO_DISK_IO
@@ -1055,7 +1055,7 @@ int do_proc_diskstats(int update_every, usec_t dt) {
, family
, "disk.backlog"
, "Disk Backlog"
- , "backlog (ms)"
+ , "milliseconds"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_DISKSTATS_NAME
, NETDATA_CHART_PRIO_DISK_BACKLOG
@@ -1186,7 +1186,7 @@ int do_proc_diskstats(int update_every, usec_t dt) {
, family
, "disk.await"
, "Average Completed I/O Operation Time"
- , "ms per operation"
+ , "milliseconds/operation"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_DISKSTATS_NAME
, NETDATA_CHART_PRIO_DISK_AWAIT
@@ -1217,7 +1217,7 @@ int do_proc_diskstats(int update_every, usec_t dt) {
, family
, "disk.avgsz"
, "Average Completed I/O Operation Bandwidth"
- , "kilobytes per operation"
+ , "KiB/operation"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_DISKSTATS_NAME
, NETDATA_CHART_PRIO_DISK_AVGSZ
@@ -1248,7 +1248,7 @@ int do_proc_diskstats(int update_every, usec_t dt) {
, family
, "disk.svctm"
, "Average Service Time"
- , "ms per operation"
+ , "milliseconds/operation"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_DISKSTATS_NAME
, NETDATA_CHART_PRIO_DISK_SVCTM
@@ -1385,7 +1385,7 @@ int do_proc_diskstats(int update_every, usec_t dt) {
, family
, "disk.bcache_rates"
, "BCache Rates"
- , "KB/s"
+ , "KiB/s"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_DISKSTATS_NAME
, NETDATA_CHART_PRIO_BCACHE_RATES
@@ -1412,7 +1412,7 @@ int do_proc_diskstats(int update_every, usec_t dt) {
, family
, "disk.bcache_size"
, "BCache Cache Sizes"
- , "MB"
+ , "MiB"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_DISKSTATS_NAME
, NETDATA_CHART_PRIO_BCACHE_SIZE
@@ -1437,7 +1437,7 @@ int do_proc_diskstats(int update_every, usec_t dt) {
, family
, "disk.bcache_usage"
, "BCache Cache Usage"
- , "percent"
+ , "percentage"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_DISKSTATS_NAME
, NETDATA_CHART_PRIO_BCACHE_USAGE
@@ -1563,7 +1563,7 @@ int do_proc_diskstats(int update_every, usec_t dt) {
, "disk"
, NULL
, "Disk I/O"
- , "kilobytes/s"
+ , "KiB/s"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_DISKSTATS_NAME
, NETDATA_CHART_PRIO_SYSTEM_IO
diff --git a/collectors/proc.plugin/proc_mdstat.c b/collectors/proc.plugin/proc_mdstat.c
new file mode 100644
index 00000000..d0925ec3
--- /dev/null
+++ b/collectors/proc.plugin/proc_mdstat.c
@@ -0,0 +1,641 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_MDSTAT_NAME "/proc/mdstat"
+
+struct raid {
+ int redundant;
+ char *name;
+ uint32_t hash;
+
+ RRDDIM *rd_health;
+ unsigned long long failed_disks;
+
+ RRDSET *st_disks;
+ RRDDIM *rd_total;
+ RRDDIM *rd_inuse;
+ unsigned long long total_disks;
+ unsigned long long inuse_disks;
+
+ RRDSET *st_operation;
+ RRDDIM *rd_check;
+ RRDDIM *rd_resync;
+ RRDDIM *rd_recovery;
+ RRDDIM *rd_reshape;
+ unsigned long long check;
+ unsigned long long resync;
+ unsigned long long recovery;
+ unsigned long long reshape;
+
+ RRDSET *st_finish;
+ RRDDIM *rd_finish_in;
+ unsigned long long finish_in;
+
+ RRDSET *st_speed;
+ RRDDIM *rd_speed;
+ unsigned long long speed;
+
+ char *mismatch_cnt_filename;
+ RRDSET *st_mismatch_cnt;
+ RRDDIM *rd_mismatch_cnt;
+ unsigned long long mismatch_cnt;
+
+ RRDSET *st_nonredundant;
+ RRDDIM *rd_nonredundant;
+};
+
+struct old_raid {
+ int redundant;
+ char *name;
+ uint32_t hash;
+ int found;
+};
+
+static inline char *remove_trailing_chars(char *s, char c) {
+ while(*s) {
+ if(unlikely(*s == c)) {
+ *s = '\0';
+ }
+ s++;
+ }
+ return s;
+}
+
+static inline void make_chart_obsolete(char *name, const char *id_modifier) {
+ char id[50 + 1];
+ RRDSET *st = NULL;
+
+ if(likely(name && id_modifier)) {
+ snprintfz(id, 50, "mdstat.%s_%s", name, id_modifier);
+ st = rrdset_find_byname_localhost(id);
+ if(likely(st)) rrdset_is_obsolete(st);
+ }
+}
+
+int do_proc_mdstat(int update_every, usec_t dt) {
+ (void)dt;
+ static procfile *ff = NULL;
+ static int do_health = -1, do_nonredundant = -1, do_disks = -1, do_operations = -1, do_mismatch = -1, do_mismatch_config = -1;
+ static int make_charts_obsolete = -1;
+ static char *mdstat_filename = NULL, *mismatch_cnt_filename = NULL;
+ static struct raid *raids = NULL;
+ static size_t raids_allocated = 0;
+ size_t raids_num = 0, raid_idx = 0, redundant_num = 0;
+ static struct old_raid *old_raids = NULL;
+ static size_t old_raids_allocated = 0;
+ size_t old_raid_idx = 0;
+
+ if(unlikely(do_health == -1)){
+ do_health = config_get_boolean("plugin:proc:/proc/mdstat", "faulty devices", CONFIG_BOOLEAN_YES);
+ do_nonredundant = config_get_boolean("plugin:proc:/proc/mdstat", "nonredundant arrays availability", CONFIG_BOOLEAN_YES);
+ do_mismatch_config = config_get_boolean_ondemand("plugin:proc:/proc/mdstat", "mismatch count", CONFIG_BOOLEAN_AUTO);
+ do_disks = config_get_boolean("plugin:proc:/proc/mdstat", "disk stats", CONFIG_BOOLEAN_YES);
+ do_operations = config_get_boolean("plugin:proc:/proc/mdstat", "operation status", CONFIG_BOOLEAN_YES);
+
+ make_charts_obsolete = config_get_boolean("plugin:proc:/proc/mdstat", "make charts obsolete", CONFIG_BOOLEAN_YES);
+
+ char filename[FILENAME_MAX + 1];
+
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/mdstat");
+ mdstat_filename = config_get("plugin:proc:/proc/mdstat", "filename to monitor", filename);
+
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/block/%s/md/mismatch_cnt");
+ mismatch_cnt_filename = config_get("plugin:proc:/proc/mdstat", "mismatch_cnt filename to monitor", filename);
+ }
+
+ if(unlikely(!ff)) {
+ ff = procfile_open(mdstat_filename, " \t:", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) return 0; // we return 0, so that we will retry opening it next time
+
+ size_t lines = procfile_lines(ff);
+ size_t words = 0;
+
+ if(unlikely(lines < 2)) {
+ error("Cannot read /proc/mdstat. Expected 2 or more lines, read %zu.", lines);
+ return 1;
+ }
+
+ // find how many raids are there
+ size_t l;
+ raids_num = 0;
+ for(l = 1; l < lines - 2 ; l++) {
+ if(unlikely(procfile_lineword(ff, l, 1)[0] == 'a')) // check if the raid is active
+ raids_num++;
+ }
+
+ if(unlikely(!raids_num && !old_raids_allocated)) return 0; // we return 0, so that we will retry searching for raids next time
+
+ // allocate the memory we need;
+ if(unlikely(raids_num != raids_allocated)) {
+ for(raid_idx = 0; raid_idx < raids_allocated; raid_idx++) {
+ struct raid *raid = &raids[raid_idx];
+ freez(raid->name);
+ freez(raid->mismatch_cnt_filename);
+ }
+ if(raids_num) {
+ raids = (struct raid *)reallocz(raids, raids_num * sizeof(struct raid));
+ memset(raids, 0, raids_num * sizeof(struct raid));
+ }
+ else {
+ freez(raids);
+ raids = NULL;
+ }
+ raids_allocated = raids_num;
+ }
+
+ // loop through all lines except the first and the last ones
+ for(l = 1, raid_idx = 0; l < (lines - 2) && raid_idx < raids_num; l++) {
+ struct raid *raid = &raids[raid_idx];
+ raid->redundant = 0;
+
+ words = procfile_linewords(ff, l);
+ if(unlikely(words < 2)) continue;
+
+ if(unlikely(procfile_lineword(ff, l, 1)[0] != 'a')) continue;
+ if(unlikely(!raid->name)) {
+ raid->name = strdupz(procfile_lineword(ff, l, 0));
+ raid->hash = simple_hash(raid->name);
+ }
+ else if(unlikely(strcmp(raid->name, procfile_lineword(ff, l, 0)))) {
+ freez(raid->name);
+ freez(raid->mismatch_cnt_filename);
+ memset(raid, 0, sizeof(struct raid));
+ raid->name = strdupz(procfile_lineword(ff, l, 0));
+ raid->hash = simple_hash(raid->name);
+ }
+ if(unlikely(!raid->name || !raid->name[0])) continue;
+ raid_idx++;
+
+ // check if raid has disk status
+ l++;
+ words = procfile_linewords(ff, l);
+ if(words < 2 || procfile_lineword(ff, l, words - 1)[0] != '[') continue;
+
+ // split inuse and total number of disks
+ if(likely(do_health || do_disks)) {
+ char *s = NULL, *str_total = NULL, *str_inuse = NULL;
+
+ s = procfile_lineword(ff, l, words - 2);
+ if(unlikely(s[0] != '[')) {
+ error("Cannot read /proc/mdstat raid health status. Unexpected format: missing opening bracket.");
+ continue;
+ }
+ str_total = ++s;
+ while(*s) {
+ if(unlikely(*s == '/')) {
+ *s = '\0';
+ str_inuse = s + 1;
+ }
+ else if(unlikely(*s == ']')) {
+ *s = '\0';
+ break;
+ }
+ s++;
+ }
+ if(unlikely(str_total[0] == '\0' || str_inuse[0] == '\0')) {
+ error("Cannot read /proc/mdstat raid health status. Unexpected format.");
+ continue;
+ }
+
+ raid->inuse_disks = str2ull(str_inuse);
+ raid->total_disks = str2ull(str_total);
+ raid->failed_disks = raid->total_disks - raid->inuse_disks;
+ }
+
+ raid->redundant = 1;
+ redundant_num++;
+ l++;
+
+ // check if any operation is performed on the raid
+ if(likely(do_operations)) {
+ char *s = NULL;
+
+ raid->check = 0;
+ raid->resync = 0;
+ raid->recovery = 0;
+ raid->reshape = 0;
+ raid->finish_in = 0;
+ raid->speed = 0;
+
+ words = procfile_linewords(ff, l);
+ if(likely(words < 2)) continue;
+ if(unlikely(procfile_lineword(ff, l, 0)[0] != '[')) continue;
+ if(unlikely(words < 7)) {
+ error("Cannot read /proc/mdstat line. Expected 7 params, read %zu.", words);
+ continue;
+ }
+
+ char *word;
+ word = procfile_lineword(ff, l, 3);
+ remove_trailing_chars(word, '%');
+
+ unsigned long long percentage = (unsigned long long)(str2ld(word, NULL) * 100);
+ // possible operations: check, resync, recovery, reshape
+ // 4-th character is unique for each operation so it is checked
+ switch(procfile_lineword(ff, l, 1)[3]) {
+ case 'c': // check
+ raid->check = percentage;
+ break;
+ case 'y': // resync
+ raid->resync = percentage;
+ break;
+ case 'o': // recovery
+ raid->recovery = percentage;
+ break;
+ case 'h': // reshape
+ raid->reshape = percentage;
+ break;
+ }
+
+ word = procfile_lineword(ff, l, 5);
+ s = remove_trailing_chars(word, 'm'); // remove trailing "min"
+
+ word += 7; // skip leading "finish="
+
+ if(likely(s > word))
+ raid->finish_in = (unsigned long long)(str2ld(word, NULL) * 60);
+
+ word = procfile_lineword(ff, l, 6);
+ s = remove_trailing_chars(word, 'K'); // remove trailing "K/sec"
+
+ word += 6; // skip leading "speed="
+
+ if(likely(s > word))
+ raid->speed = str2ull(word);
+ }
+ }
+
+ // read mismatch_cnt files
+ if(do_mismatch == -1) {
+ if(do_mismatch_config == CONFIG_BOOLEAN_AUTO) {
+ if(raids_num > 50)
+ do_mismatch = CONFIG_BOOLEAN_NO;
+ else
+ do_mismatch = CONFIG_BOOLEAN_YES;
+ }
+ else
+ do_mismatch = do_mismatch_config;
+ }
+
+ if(likely(do_mismatch)) {
+ for(raid_idx = 0; raid_idx < raids_num ; raid_idx++) {
+ char filename[FILENAME_MAX + 1];
+ struct raid *raid = &raids[raid_idx];
+
+ if(likely(raid->redundant)) {
+ if(unlikely(!raid->mismatch_cnt_filename)) {
+ snprintfz(filename, FILENAME_MAX, mismatch_cnt_filename, raid->name);
+ raid->mismatch_cnt_filename = strdupz(filename);
+ }
+ if(unlikely(read_single_number_file(raid->mismatch_cnt_filename, &raid->mismatch_cnt))) {
+ error("Cannot read file '%s'", raid->mismatch_cnt_filename);
+ do_mismatch = CONFIG_BOOLEAN_NO;
+ error("Monitoring for mismatch count has been disabled");
+ break;
+ }
+ }
+ }
+ }
+
+ // check for disappeared raids
+ for(old_raid_idx = 0; old_raid_idx < old_raids_allocated; old_raid_idx++) {
+ struct old_raid *old_raid = &old_raids[old_raid_idx];
+ int found = 0;
+
+ for(raid_idx = 0; raid_idx < raids_num ; raid_idx++) {
+ struct raid *raid = &raids[raid_idx];
+
+ if(unlikely(raid->hash == old_raid->hash
+ && !strcmp(raid->name, old_raid->name)
+ && raid->redundant == old_raid->redundant)) found = 1;
+ }
+
+ old_raid->found = found;
+ }
+
+ int raid_disappeared = 0;
+ for(old_raid_idx = 0; old_raid_idx < old_raids_allocated; old_raid_idx++) {
+ struct old_raid *old_raid = &old_raids[old_raid_idx];
+
+ if(unlikely(!old_raid->found)) {
+ if(likely(make_charts_obsolete)) {
+ make_chart_obsolete(old_raid->name, "disks");
+ make_chart_obsolete(old_raid->name, "mismatch");
+ make_chart_obsolete(old_raid->name, "operation");
+ make_chart_obsolete(old_raid->name, "finish");
+ make_chart_obsolete(old_raid->name, "speed");
+ make_chart_obsolete(old_raid->name, "availability");
+ }
+ raid_disappeared = 1;
+ }
+ }
+
+ // allocate memory for nonredundant arrays
+ if(unlikely(raid_disappeared || old_raids_allocated != raids_num)) {
+ for(old_raid_idx = 0; old_raid_idx < old_raids_allocated; old_raid_idx++) {
+ freez(old_raids[old_raid_idx].name);
+ }
+ if(likely(raids_num)) {
+ old_raids = reallocz(old_raids, sizeof(struct old_raid) * raids_num);
+ memset(old_raids, 0, sizeof(struct old_raid) * raids_num);
+ }
+ else {
+ freez(old_raids);
+ old_raids = NULL;
+ }
+ old_raids_allocated = raids_num;
+ for(old_raid_idx = 0; old_raid_idx < old_raids_allocated; old_raid_idx++) {
+ struct old_raid *old_raid = &old_raids[old_raid_idx];
+ struct raid *raid = &raids[old_raid_idx];
+
+ old_raid->name = strdupz(raid->name);
+ old_raid->hash = raid->hash;
+ old_raid->redundant = raid->redundant;
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ if(likely(do_health && redundant_num)) {
+ static RRDSET *st_mdstat_health = NULL;
+ if(unlikely(!st_mdstat_health)) {
+ st_mdstat_health = rrdset_create_localhost(
+ "mdstat"
+ , "mdstat_health"
+ , NULL
+ , "health"
+ , "md.health"
+ , "Faulty Devices In MD"
+ , "failed disks"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_MDSTAT_NAME
+ , NETDATA_CHART_PRIO_MDSTAT_HEALTH
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_isnot_obsolete(st_mdstat_health);
+ }
+ else
+ rrdset_next(st_mdstat_health);
+
+ if(!redundant_num) {
+ if(likely(make_charts_obsolete)) make_chart_obsolete("mdstat", "health");
+ }
+ else {
+ for(raid_idx = 0; raid_idx < raids_num; raid_idx++) {
+ struct raid *raid = &raids[raid_idx];
+
+ if(likely(raid->redundant)) {
+ if(unlikely(!raid->rd_health && !(raid->rd_health = rrddim_find(st_mdstat_health, raid->name))))
+ raid->rd_health = rrddim_add(st_mdstat_health, raid->name, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ rrddim_set_by_pointer(st_mdstat_health, raid->rd_health, raid->failed_disks);
+ }
+ }
+
+ rrdset_done(st_mdstat_health);
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ for(raid_idx = 0; raid_idx < raids_num ; raid_idx++) {
+ struct raid *raid = &raids[raid_idx];
+ char id[50 + 1];
+ char family[50 + 1];
+
+ if(likely(raid->redundant)) {
+ if(likely(do_disks)) {
+ snprintfz(id, 50, "%s_disks", raid->name);
+
+ if(unlikely(!raid->st_disks && !(raid->st_disks = rrdset_find_byname_localhost(id)))) {
+ snprintfz(family, 50, "%s", raid->name);
+
+ raid->st_disks = rrdset_create_localhost(
+ "mdstat"
+ , id
+ , NULL
+ , family
+ , "md.disks"
+ , "Disks Stats"
+ , "disks"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_MDSTAT_NAME
+ , NETDATA_CHART_PRIO_MDSTAT_DISKS + raid_idx * 10
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rrdset_isnot_obsolete(raid->st_disks);
+ }
+ else
+ rrdset_next(raid->st_disks);
+
+ if(unlikely(!raid->rd_inuse && !(raid->rd_inuse = rrddim_find(raid->st_disks, "inuse"))))
+ raid->rd_inuse = rrddim_add(raid->st_disks, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ if(unlikely(!raid->rd_total && !(raid->rd_total = rrddim_find(raid->st_disks, "total"))))
+ raid->rd_total = rrddim_add(raid->st_disks, "total", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ rrddim_set_by_pointer(raid->st_disks, raid->rd_inuse, raid->inuse_disks);
+ rrddim_set_by_pointer(raid->st_disks, raid->rd_total, raid->total_disks);
+
+ rrdset_done(raid->st_disks);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(likely(do_mismatch)) {
+ snprintfz(id, 50, "%s_mismatch", raid->name);
+
+ if(unlikely(!raid->st_mismatch_cnt && !(raid->st_mismatch_cnt = rrdset_find_byname_localhost(id)))) {
+ snprintfz(family, 50, "%s", raid->name);
+
+ raid->st_mismatch_cnt = rrdset_create_localhost(
+ "mdstat"
+ , id
+ , NULL
+ , family
+ , "md.mismatch_cnt"
+ , "Mismatch Count"
+ , "unsynchronized blocks"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_MDSTAT_NAME
+ , NETDATA_CHART_PRIO_MDSTAT_MISMATCH + raid_idx * 10
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_isnot_obsolete(raid->st_mismatch_cnt);
+ }
+ else
+ rrdset_next(raid->st_mismatch_cnt);
+
+ if(unlikely(!raid->rd_mismatch_cnt && !(raid->rd_mismatch_cnt = rrddim_find(raid->st_mismatch_cnt, "count"))))
+ raid->rd_mismatch_cnt = rrddim_add(raid->st_mismatch_cnt, "count", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ rrddim_set_by_pointer(raid->st_mismatch_cnt, raid->rd_mismatch_cnt, raid->mismatch_cnt);
+
+ rrdset_done(raid->st_mismatch_cnt);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(likely(do_operations)) {
+ snprintfz(id, 50, "%s_operation", raid->name);
+
+ if(unlikely(!raid->st_operation && !(raid->st_operation = rrdset_find_byname_localhost(id)))) {
+ snprintfz(family, 50, "%s", raid->name);
+
+ raid->st_operation = rrdset_create_localhost(
+ "mdstat"
+ , id
+ , NULL
+ , family
+ , "md.status"
+ , "Current Status"
+ , "percent"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_MDSTAT_NAME
+ , NETDATA_CHART_PRIO_MDSTAT_OPERATION + raid_idx * 10
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_isnot_obsolete(raid->st_operation);
+ }
+ else
+ rrdset_next(raid->st_operation);
+
+ if(unlikely(!raid->rd_check && !(raid->rd_check = rrddim_find(raid->st_operation, "check"))))
+ raid->rd_check = rrddim_add(raid->st_operation, "check", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ if(unlikely(!raid->rd_resync && !(raid->rd_resync = rrddim_find(raid->st_operation, "resync"))))
+ raid->rd_resync = rrddim_add(raid->st_operation, "resync", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ if(unlikely(!raid->rd_recovery && !(raid->rd_recovery = rrddim_find(raid->st_operation, "recovery"))))
+ raid->rd_recovery = rrddim_add(raid->st_operation, "recovery", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ if(unlikely(!raid->rd_reshape && !(raid->rd_reshape = rrddim_find(raid->st_operation, "reshape"))))
+ raid->rd_reshape = rrddim_add(raid->st_operation, "reshape", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+
+ rrddim_set_by_pointer(raid->st_operation, raid->rd_check, raid->check);
+ rrddim_set_by_pointer(raid->st_operation, raid->rd_resync, raid->resync);
+ rrddim_set_by_pointer(raid->st_operation, raid->rd_recovery, raid->recovery);
+ rrddim_set_by_pointer(raid->st_operation, raid->rd_reshape, raid->reshape);
+
+ rrdset_done(raid->st_operation);
+
+ // --------------------------------------------------------------------
+
+ snprintfz(id, 50, "%s_finish", raid->name);
+
+ if(unlikely(!raid->st_finish && !(raid->st_finish = rrdset_find_byname_localhost(id)))) {
+ snprintfz(family, 50, "%s", raid->name);
+
+ raid->st_finish = rrdset_create_localhost(
+ "mdstat"
+ , id
+ , NULL
+ , family
+ , "md.rate"
+ , "Approximate Time Unit Finish"
+ , "seconds"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_MDSTAT_NAME
+ , NETDATA_CHART_PRIO_MDSTAT_FINISH + raid_idx * 10
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_isnot_obsolete(raid->st_finish);
+ }
+ else
+ rrdset_next(raid->st_finish);
+
+ if(unlikely(!raid->rd_finish_in && !(raid->rd_finish_in = rrddim_find(raid->st_finish, "finish_in"))))
+ raid->rd_finish_in = rrddim_add(raid->st_finish, "finish_in", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ rrddim_set_by_pointer(raid->st_finish, raid->rd_finish_in, raid->finish_in);
+
+ rrdset_done(raid->st_finish);
+
+ // --------------------------------------------------------------------
+
+ snprintfz(id, 50, "%s_speed", raid->name);
+
+ if(unlikely(!raid->st_speed && !(raid->st_speed = rrdset_find_byname_localhost(id)))) {
+ snprintfz(family, 50, "%s", raid->name);
+
+ raid->st_speed = rrdset_create_localhost(
+ "mdstat"
+ , id
+ , NULL
+ , family
+ , "md.rate"
+ , "Operation Speed"
+ , "KiB/s"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_MDSTAT_NAME
+ , NETDATA_CHART_PRIO_MDSTAT_SPEED + raid_idx * 10
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_isnot_obsolete(raid->st_speed);
+ }
+ else
+ rrdset_next(raid->st_speed);
+
+ if(unlikely(!raid->rd_speed && !(raid->rd_speed = rrddim_find(raid->st_speed, "speed"))))
+ raid->rd_speed = rrddim_add(raid->st_speed, "speed", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ rrddim_set_by_pointer(raid->st_speed, raid->rd_speed, raid->speed);
+
+ rrdset_done(raid->st_speed);
+ }
+ }
+ else {
+
+ // --------------------------------------------------------------------
+
+ if(likely(do_nonredundant)) {
+ snprintfz(id, 50, "%s_availability", raid->name);
+
+ if(unlikely(!raid->st_nonredundant && !(raid->st_nonredundant = rrdset_find_localhost(id)))) {
+ snprintfz(family, 50, "%s", raid->name);
+
+ raid->st_nonredundant = rrdset_create_localhost(
+ "mdstat"
+ , id
+ , NULL
+ , family
+ , "md.nonredundant"
+ , "Nonredundant Array Availability"
+ , "boolean"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_MDSTAT_NAME
+ , NETDATA_CHART_PRIO_MDSTAT_NONREDUNDANT + raid_idx * 10
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_isnot_obsolete(raid->st_nonredundant);
+ }
+ else
+ rrdset_next(raid->st_nonredundant);
+
+ if(unlikely(!raid->rd_nonredundant && !(raid->rd_nonredundant = rrddim_find(raid->st_nonredundant, "available"))))
+ raid->rd_nonredundant = rrddim_add(raid->st_nonredundant, "available", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ rrddim_set_by_pointer(raid->st_nonredundant, raid->rd_nonredundant, 1);
+
+ rrdset_done(raid->st_nonredundant);
+ }
+ }
+ }
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_meminfo.c b/collectors/proc.plugin/proc_meminfo.c
index f77159eb..ae399c44 100644
--- a/collectors/proc.plugin/proc_meminfo.c
+++ b/collectors/proc.plugin/proc_meminfo.c
@@ -146,7 +146,7 @@ int do_proc_meminfo(int update_every, usec_t dt) {
// --------------------------------------------------------------------
// http://stackoverflow.com/questions/3019748/how-to-reliably-measure-available-memory-in-linux
- unsigned long long MemCached = Cached + Slab;
+ unsigned long long MemCached = Cached + SReclaimable;
unsigned long long MemUsed = MemTotal - MemFree - MemCached - Buffers;
if(do_ram) {
@@ -162,7 +162,7 @@ int do_proc_meminfo(int update_every, usec_t dt) {
, "ram"
, NULL
, "System RAM"
- , "MB"
+ , "MiB"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_MEMINFO_NAME
, NETDATA_CHART_PRIO_SYSTEM_RAM
@@ -197,7 +197,7 @@ int do_proc_meminfo(int update_every, usec_t dt) {
, "system"
, NULL
, "Available RAM for applications"
- , "MB"
+ , "MiB"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_MEMINFO_NAME
, NETDATA_CHART_PRIO_MEM_SYSTEM_AVAILABLE
@@ -233,7 +233,7 @@ int do_proc_meminfo(int update_every, usec_t dt) {
, "swap"
, NULL
, "System Swap"
- , "MB"
+ , "MiB"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_MEMINFO_NAME
, NETDATA_CHART_PRIO_SYSTEM_SWAP
@@ -270,7 +270,7 @@ int do_proc_meminfo(int update_every, usec_t dt) {
, "ecc"
, NULL
, "Corrupted Memory, detected by ECC"
- , "MB"
+ , "MiB"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_MEMINFO_NAME
, NETDATA_CHART_PRIO_MEM_HW
@@ -303,7 +303,7 @@ int do_proc_meminfo(int update_every, usec_t dt) {
, "system"
, NULL
, "Committed (Allocated) Memory"
- , "MB"
+ , "MiB"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_MEMINFO_NAME
, NETDATA_CHART_PRIO_MEM_SYSTEM_COMMITTED
@@ -336,7 +336,7 @@ int do_proc_meminfo(int update_every, usec_t dt) {
, "kernel"
, NULL
, "Writeback Memory"
- , "MB"
+ , "MiB"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_MEMINFO_NAME
, NETDATA_CHART_PRIO_MEM_KERNEL
@@ -376,7 +376,7 @@ int do_proc_meminfo(int update_every, usec_t dt) {
, "kernel"
, NULL
, "Memory Used by Kernel"
- , "MB"
+ , "MiB"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_MEMINFO_NAME
, NETDATA_CHART_PRIO_MEM_KERNEL + 1
@@ -415,7 +415,7 @@ int do_proc_meminfo(int update_every, usec_t dt) {
, "slab"
, NULL
, "Reclaimable Kernel Memory"
- , "MB"
+ , "MiB"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_MEMINFO_NAME
, NETDATA_CHART_PRIO_MEM_SLAB
@@ -452,7 +452,7 @@ int do_proc_meminfo(int update_every, usec_t dt) {
, "hugepages"
, NULL
, "Dedicated HugePages Memory"
- , "MB"
+ , "MiB"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_MEMINFO_NAME
, NETDATA_CHART_PRIO_MEM_HUGEPAGES + 1
@@ -493,7 +493,7 @@ int do_proc_meminfo(int update_every, usec_t dt) {
, "hugepages"
, NULL
, "Transparent HugePages Memory"
- , "MB"
+ , "MiB"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_MEMINFO_NAME
, NETDATA_CHART_PRIO_MEM_HUGEPAGES
diff --git a/collectors/proc.plugin/proc_net_rpc_nfsd.c b/collectors/proc.plugin/proc_net_rpc_nfsd.c
index 20b87e9d..29ef7a39 100644
--- a/collectors/proc.plugin/proc_net_rpc_nfsd.c
+++ b/collectors/proc.plugin/proc_net_rpc_nfsd.c
@@ -657,7 +657,7 @@ int do_proc_net_rpc_nfsd(int update_every, usec_t dt) {
, "threads"
, NULL
, "NFS Server Threads Full Count"
- , "ops/s"
+ , "events"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_NFSD_NAME
, NETDATA_CHART_PRIO_NFSD_THREADS_FULLCNT
diff --git a/collectors/proc.plugin/proc_net_sockstat.c b/collectors/proc.plugin/proc_net_sockstat.c
index 0c3b6e19..ff9cc523 100644
--- a/collectors/proc.plugin/proc_net_sockstat.c
+++ b/collectors/proc.plugin/proc_net_sockstat.c
@@ -305,7 +305,7 @@ int do_proc_net_sockstat(int update_every, usec_t dt) {
, "tcp"
, NULL
, "IPv4 TCP Sockets Memory"
- , "KB"
+ , "KiB"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME
, NETDATA_CHART_PRIO_IPV4_TCP_MEM
@@ -369,7 +369,7 @@ int do_proc_net_sockstat(int update_every, usec_t dt) {
, "udp"
, NULL
, "IPv4 UDP Sockets Memory"
- , "KB"
+ , "KiB"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME
, NETDATA_CHART_PRIO_IPV4_UDP_MEM
@@ -497,7 +497,7 @@ int do_proc_net_sockstat(int update_every, usec_t dt) {
, "fragments"
, NULL
, "IPv4 FRAG Sockets Memory"
- , "KB"
+ , "KiB"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME
, NETDATA_CHART_PRIO_IPV4_FRAGMENTS_MEM
diff --git a/collectors/proc.plugin/proc_net_stat_synproxy.c b/collectors/proc.plugin/proc_net_stat_synproxy.c
index f0c1f47c..312ded5b 100644
--- a/collectors/proc.plugin/proc_net_stat_synproxy.c
+++ b/collectors/proc.plugin/proc_net_stat_synproxy.c
@@ -101,7 +101,7 @@ int do_proc_net_stat_synproxy(int update_every, usec_t dt) {
, RRD_TYPE_NET_STAT_SYNPROXY
, NULL
, "SYNPROXY SYN Packets received"
- , "SYN/s"
+ , "packets/s"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_SYNPROXY_NAME
, NETDATA_CHART_PRIO_SYNPROXY_SYN_RECEIVED
diff --git a/collectors/proc.plugin/proc_spl_kstat_zfs.c b/collectors/proc.plugin/proc_spl_kstat_zfs.c
index a96b236c..c6557289 100644
--- a/collectors/proc.plugin/proc_spl_kstat_zfs.c
+++ b/collectors/proc.plugin/proc_spl_kstat_zfs.c
@@ -10,7 +10,9 @@ extern struct arcstats arcstats;
int do_proc_spl_kstat_zfs_arcstats(int update_every, usec_t dt) {
(void)dt;
+ static int show_zero_charts = 0, do_zfs_stats = 0;
static procfile *ff = NULL;
+ static char *dirname = NULL;
static ARL_BASE *arl_base = NULL;
arcstats.l2exist = -1;
@@ -117,8 +119,45 @@ int do_proc_spl_kstat_zfs_arcstats(int update_every, usec_t dt) {
ff = procfile_open(config_get("plugin:proc:" ZFS_PROC_ARCSTATS, "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT);
if(unlikely(!ff))
return 1;
+
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/spl/kstat/zfs");
+ dirname = config_get("plugin:proc:" ZFS_PROC_ARCSTATS, "directory to monitor", filename);
+
+ show_zero_charts = config_get_boolean_ondemand("plugin:proc:" ZFS_PROC_ARCSTATS, "show zero charts", CONFIG_BOOLEAN_NO);
+ if(unlikely(show_zero_charts == CONFIG_BOOLEAN_YES))
+ do_zfs_stats = 1;
+ }
+
+ // check if any pools exist
+ if(likely(!do_zfs_stats)) {
+ DIR *dir = opendir(dirname);
+ if(unlikely(!dir)) {
+ error("Cannot read directory '%s'", dirname);
+ return 1;
+ }
+
+ struct dirent *de = NULL;
+ while(likely(de = readdir(dir))) {
+ if(likely(de->d_type == DT_DIR
+ && (
+ (de->d_name[0] == '.' && de->d_name[1] == '\0')
+ || (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0')
+ )))
+ continue;
+
+ if(unlikely(de->d_type == DT_LNK || de->d_type == DT_DIR)) {
+ do_zfs_stats = 1;
+ break;
+ }
+ }
+
+ closedir(dir);
}
+ // do not show ZFS filesystem metrics if there haven't been any pools in the system yet
+ if(unlikely(!do_zfs_stats))
+ return 0;
+
ff = procfile_readall(ff);
if(unlikely(!ff))
return 0; // we return 0, so that we will retry to open it next time
@@ -148,8 +187,8 @@ int do_proc_spl_kstat_zfs_arcstats(int update_every, usec_t dt) {
if(unlikely(arcstats.l2exist == -1))
arcstats.l2exist = 0;
- generate_charts_arcstats(PLUGIN_PROC_NAME, ZFS_PROC_ARCSTATS, update_every);
- generate_charts_arc_summary(PLUGIN_PROC_NAME, ZFS_PROC_ARCSTATS, update_every);
+ generate_charts_arcstats(PLUGIN_PROC_NAME, ZFS_PROC_ARCSTATS, show_zero_charts, update_every);
+ generate_charts_arc_summary(PLUGIN_PROC_NAME, ZFS_PROC_ARCSTATS, show_zero_charts, update_every);
return 0;
}
diff --git a/collectors/proc.plugin/proc_stat.c b/collectors/proc.plugin/proc_stat.c
index 931b415a..f345a39d 100755..100644
--- a/collectors/proc.plugin/proc_stat.c
+++ b/collectors/proc.plugin/proc_stat.c
@@ -52,6 +52,7 @@ struct cpu_chart {
};
static int keep_per_core_fds_open = CONFIG_BOOLEAN_YES;
+static int keep_cpuidle_fds_open = CONFIG_BOOLEAN_YES;
static int read_per_core_files(struct cpu_chart *all_cpu_charts, size_t len, size_t index) {
char buf[50 + 1];
@@ -161,7 +162,7 @@ static int read_per_core_time_in_state_files(struct cpu_chart *all_cpu_charts, s
// the whole period under schedutil governor?
// freez(tsf->last_ticks);
// tsf->last_ticks = NULL;
- // tsf->last_ticks_len = 0;
+ // tsf->last_ticks_len = 0;
continue;
}
@@ -237,15 +238,249 @@ static void chart_per_core_files(struct cpu_chart *all_cpu_charts, size_t len, s
}
}
+struct cpuidle_state {
+ char *name;
+
+ char *time_filename;
+ int time_fd;
+
+ collected_number value;
+
+ RRDDIM *rd;
+};
+
+struct per_core_cpuidle_chart {
+ RRDSET *st;
+
+ RRDDIM *active_time_rd;
+ collected_number active_time;
+ collected_number last_active_time;
+
+ struct cpuidle_state *cpuidle_state;
+ size_t cpuidle_state_len;
+ int rescan_cpu_states;
+};
+
+static void* wake_cpu_thread(void* core) {
+ pthread_t thread;
+ cpu_set_t cpu_set;
+ static size_t cpu_wakeups = 0;
+ static int errors = 0;
+
+ CPU_ZERO(&cpu_set);
+ CPU_SET(*(int*)core, &cpu_set);
+
+ thread = pthread_self();
+ if(unlikely(pthread_setaffinity_np(thread, sizeof(cpu_set_t), &cpu_set))) {
+ if(unlikely(errors < 8)) {
+ error("Cannot set CPU affinity for core %d", *(int*)core);
+ errors++;
+ }
+ else if(unlikely(errors < 9)) {
+ error("CPU affinity errors are disabled");
+ errors++;
+ }
+ }
+
+ // Make the CPU core do something to force it to update its idle counters
+ cpu_wakeups++;
+
+ return 0;
+}
+
+static int read_schedstat(char *schedstat_filename, struct per_core_cpuidle_chart **cpuidle_charts_address, size_t *schedstat_cores_found) {
+ static size_t cpuidle_charts_len = 0;
+ static procfile *ff = NULL;
+ struct per_core_cpuidle_chart *cpuidle_charts = *cpuidle_charts_address;
+ size_t cores_found = 0;
+
+ if(unlikely(!ff)) {
+ ff = procfile_open(schedstat_filename, " \t:", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) return 1;
+
+ size_t lines = procfile_lines(ff), l;
+ size_t words;
+
+ for(l = 0; l < lines ;l++) {
+ char *row_key = procfile_lineword(ff, l, 0);
+
+ // faster strncmp(row_key, "cpu", 3) == 0
+ if(likely(row_key[0] == 'c' && row_key[1] == 'p' && row_key[2] == 'u')) {
+ words = procfile_linewords(ff, l);
+ if(unlikely(words < 10)) {
+ error("Cannot read /proc/schedstat cpu line. Expected 9 params, read %zu.", words);
+ return 1;
+ }
+ cores_found++;
+
+ size_t core = str2ul(&row_key[3]);
+ if(unlikely(core >= cores_found)) {
+ error("Core %zu found but no more than %zu cores were expected.", core, cores_found);
+ return 1;
+ }
+
+ if(unlikely(cpuidle_charts_len < cores_found)) {
+ cpuidle_charts = reallocz(cpuidle_charts, sizeof(struct per_core_cpuidle_chart) * cores_found);
+ *cpuidle_charts_address = cpuidle_charts;
+ memset(cpuidle_charts + cpuidle_charts_len, 0, sizeof(struct per_core_cpuidle_chart) * (cores_found - cpuidle_charts_len));
+ cpuidle_charts_len = cores_found;
+ }
+
+ cpuidle_charts[core].active_time = str2ull(procfile_lineword(ff, l, 7)) / 1000;
+ }
+ }
+
+ *schedstat_cores_found = cores_found;
+ return 0;
+}
+
+static int read_one_state(char *buf, const char *filename, int *fd) {
+ ssize_t ret = read(*fd, buf, 50);
+
+ if(unlikely(ret <= 0)) {
+ // cannot read that file
+ error("Cannot read file '%s'", filename);
+ close(*fd);
+ *fd = -1;
+ return 0;
+ }
+ else {
+ // successful read
+
+ // terminate the buffer
+ buf[ret - 1] = '\0';
+
+ if(unlikely(keep_cpuidle_fds_open != CONFIG_BOOLEAN_YES)) {
+ close(*fd);
+ *fd = -1;
+ }
+ else if(lseek(*fd, 0, SEEK_SET) == -1) {
+ error("Cannot seek in file '%s'", filename);
+ close(*fd);
+ *fd = -1;
+ }
+ }
+
+ return 1;
+}
+
+static int read_cpuidle_states(char *cpuidle_name_filename , char *cpuidle_time_filename, struct per_core_cpuidle_chart *cpuidle_charts, size_t core) {
+ char filename[FILENAME_MAX + 1];
+ static char next_state_filename[FILENAME_MAX + 1];
+ struct stat stbuf;
+ struct per_core_cpuidle_chart *cc = &cpuidle_charts[core];
+ size_t state;
+
+ if(unlikely(!cc->cpuidle_state_len || cc->rescan_cpu_states)) {
+ int state_file_found = 1; // check at least one state
+
+ if(cc->cpuidle_state_len) {
+ for(state = 0; state < cc->cpuidle_state_len; state++) {
+ freez(cc->cpuidle_state[state].name);
+
+ freez(cc->cpuidle_state[state].time_filename);
+ close(cc->cpuidle_state[state].time_fd);
+ cc->cpuidle_state[state].time_fd = -1;
+ }
+
+ freez(cc->cpuidle_state);
+ cc->cpuidle_state = NULL;
+ cc->cpuidle_state_len = 0;
+
+ cc->active_time_rd = NULL;
+ cc->st = NULL;
+ }
+
+ while(likely(state_file_found)) {
+ snprintfz(filename, FILENAME_MAX, cpuidle_name_filename, core, cc->cpuidle_state_len);
+ if (stat(filename, &stbuf) == 0)
+ cc->cpuidle_state_len++;
+ else
+ state_file_found = 0;
+ }
+ snprintfz(next_state_filename, FILENAME_MAX, cpuidle_name_filename, core, cc->cpuidle_state_len);
+
+ cc->cpuidle_state = callocz(cc->cpuidle_state_len, sizeof(struct cpuidle_state));
+ memset(cc->cpuidle_state, 0, sizeof(struct cpuidle_state) * cc->cpuidle_state_len);
+
+ for(state = 0; state < cc->cpuidle_state_len; state++) {
+ char name_buf[50 + 1];
+ snprintfz(filename, FILENAME_MAX, cpuidle_name_filename, core, state);
+
+ int fd = open(filename, O_RDONLY, 0666);
+ if(unlikely(fd == -1)) {
+ error("Cannot open file '%s'", filename);
+ cc->rescan_cpu_states = 1;
+ return 1;
+ }
+
+ ssize_t r = read(fd, name_buf, 50);
+ if(unlikely(r < 1)) {
+ error("Cannot read file '%s'", filename);
+ close(fd);
+ cc->rescan_cpu_states = 1;
+ return 1;
+ }
+
+ name_buf[r - 1] = '\0'; // erase extra character
+ cc->cpuidle_state[state].name = strdupz(name_buf);
+ close(fd);
+
+ snprintfz(filename, FILENAME_MAX, cpuidle_time_filename, core, state);
+ cc->cpuidle_state[state].time_filename = strdupz(filename);
+ cc->cpuidle_state[state].time_fd = -1;
+ }
+
+ cc->rescan_cpu_states = 0;
+ }
+
+ for(state = 0; state < cc->cpuidle_state_len; state++) {
+
+ struct cpuidle_state *cs = &cc->cpuidle_state[state];
+
+ if(unlikely(cs->time_fd == -1)) {
+ cs->time_fd = open(cs->time_filename, O_RDONLY);
+ if (unlikely(cs->time_fd == -1)) {
+ error("Cannot open file '%s'", cs->time_filename);
+ cc->rescan_cpu_states = 1;
+ return 1;
+ }
+ }
+
+ char time_buf[50 + 1];
+ if(likely(read_one_state(time_buf, cs->time_filename, &cs->time_fd))) {
+ cs->value = str2ll(time_buf, NULL);
+ }
+ else {
+ cc->rescan_cpu_states = 1;
+ return 1;
+ }
+ }
+
+ // check if the number of states was increased
+ if(unlikely(stat(next_state_filename, &stbuf) == 0)) {
+ cc->rescan_cpu_states = 1;
+ return 1;
+ }
+
+ return 0;
+}
+
int do_proc_stat(int update_every, usec_t dt) {
(void)dt;
static struct cpu_chart *all_cpu_charts = NULL;
static size_t all_cpu_charts_size = 0;
static procfile *ff = NULL;
- static int do_cpu = -1, do_cpu_cores = -1, do_interrupts = -1, do_context = -1, do_forks = -1, do_processes = -1, do_core_throttle_count = -1, do_package_throttle_count = -1, do_cpu_freq = -1;
+ static int do_cpu = -1, do_cpu_cores = -1, do_interrupts = -1, do_context = -1, do_forks = -1, do_processes = -1,
+ do_core_throttle_count = -1, do_package_throttle_count = -1, do_cpu_freq = -1, do_cpuidle = -1;
static uint32_t hash_intr, hash_ctxt, hash_processes, hash_procs_running, hash_procs_blocked;
- static char *core_throttle_count_filename = NULL, *package_throttle_count_filename = NULL, *scaling_cur_freq_filename = NULL, *time_in_state_filename = NULL;
+ static char *core_throttle_count_filename = NULL, *package_throttle_count_filename = NULL, *scaling_cur_freq_filename = NULL,
+ *time_in_state_filename = NULL, *schedstat_filename = NULL, *cpuidle_name_filename = NULL, *cpuidle_time_filename = NULL;
static RRDVAR *cpus_var = NULL;
static int accurate_freq_avail = 0, accurate_freq_is_used = 0;
size_t cores_found = (size_t)processors;
@@ -265,6 +500,7 @@ int do_proc_stat(int update_every, usec_t dt) {
do_core_throttle_count = CONFIG_BOOLEAN_NO;
do_package_throttle_count = CONFIG_BOOLEAN_NO;
do_cpu_freq = CONFIG_BOOLEAN_NO;
+ do_cpuidle = CONFIG_BOOLEAN_NO;
}
else {
// the system has a reasonable number of processors
@@ -272,12 +508,23 @@ int do_proc_stat(int update_every, usec_t dt) {
do_core_throttle_count = CONFIG_BOOLEAN_AUTO;
do_package_throttle_count = CONFIG_BOOLEAN_NO;
do_cpu_freq = CONFIG_BOOLEAN_YES;
+ do_cpuidle = CONFIG_BOOLEAN_YES;
+ }
+ if(unlikely(processors > 24)) {
+ // the system has too many processors
+ keep_cpuidle_fds_open = CONFIG_BOOLEAN_NO;
+ }
+ else {
+ // the system has a reasonable number of processors
+ keep_cpuidle_fds_open = CONFIG_BOOLEAN_YES;
}
keep_per_core_fds_open = config_get_boolean("plugin:proc:/proc/stat", "keep per core files open", keep_per_core_fds_open);
+ keep_cpuidle_fds_open = config_get_boolean("plugin:proc:/proc/stat", "keep cpuidle files open", keep_cpuidle_fds_open);
do_core_throttle_count = config_get_boolean_ondemand("plugin:proc:/proc/stat", "core_throttle_count", do_core_throttle_count);
do_package_throttle_count = config_get_boolean_ondemand("plugin:proc:/proc/stat", "package_throttle_count", do_package_throttle_count);
do_cpu_freq = config_get_boolean_ondemand("plugin:proc:/proc/stat", "cpu frequency", do_cpu_freq);
+ do_cpuidle = config_get_boolean_ondemand("plugin:proc:/proc/stat", "cpu idle states", do_cpuidle);
hash_intr = simple_hash("intr");
hash_ctxt = simple_hash("ctxt");
@@ -297,6 +544,22 @@ int do_proc_stat(int update_every, usec_t dt) {
snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/cpu/%s/cpufreq/stats/time_in_state");
time_in_state_filename = config_get("plugin:proc:/proc/stat", "time_in_state filename to monitor", filename);
+
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/schedstat");
+ schedstat_filename = config_get("plugin:proc:/proc/stat", "schedstat filename to monitor", filename);
+
+ if(do_cpuidle != CONFIG_BOOLEAN_NO) {
+ struct stat stbuf;
+
+ if (stat(schedstat_filename, &stbuf))
+ do_cpuidle = CONFIG_BOOLEAN_NO;
+ }
+
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/cpu/cpu%zu/cpuidle/state%zu/name");
+ cpuidle_name_filename = config_get("plugin:proc:/proc/stat", "cpuidle name filename to monitor", filename);
+
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/cpu/cpu%zu/cpuidle/state%zu/time");
+ cpuidle_time_filename = config_get("plugin:proc:/proc/stat", "cpuidle time filename to monitor", filename);
}
if(unlikely(!ff)) {
@@ -407,7 +670,7 @@ int do_proc_stat(int update_every, usec_t dt) {
cpu_chart->files[CPU_FREQ_INDEX].fd = -1;
do_cpu_freq = CONFIG_BOOLEAN_YES;
}
-
+
snprintfz(filename, FILENAME_MAX, time_in_state_filename, id);
if (stat(filename, &stbuf) == 0) {
@@ -702,7 +965,7 @@ int do_proc_stat(int update_every, usec_t dt) {
, "MHz"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_STAT_NAME
- , 5003
+ , NETDATA_CHART_PRIO_CPUFREQ_SCALING_CUR_FREQ
, update_every
, RRDSET_TYPE_LINE
);
@@ -715,6 +978,80 @@ int do_proc_stat(int update_every, usec_t dt) {
}
}
+ // --------------------------------------------------------------------
+
+ static struct per_core_cpuidle_chart *cpuidle_charts = NULL;
+ size_t schedstat_cores_found = 0;
+
+ if(likely(do_cpuidle != CONFIG_BOOLEAN_NO && !read_schedstat(schedstat_filename, &cpuidle_charts, &schedstat_cores_found))) {
+ int cpu_states_updated = 0;
+ size_t core, state;
+
+
+ // proc.plugin runs on Linux systems only. Multi-platform compatibility is not needed here,
+ // so bare pthread functions are used to avoid unneeded overheads.
+ for(core = 0; core < schedstat_cores_found; core++) {
+ if(unlikely(!(cpuidle_charts[core].active_time - cpuidle_charts[core].last_active_time))) {
+ pthread_t thread;
+
+ if(unlikely(pthread_create(&thread, NULL, wake_cpu_thread, (void *)&core)))
+ error("Cannot create wake_cpu_thread");
+ else if(unlikely(pthread_join(thread, NULL)))
+ error("Cannot join wake_cpu_thread");
+ cpu_states_updated = 1;
+ }
+ }
+
+ if(unlikely(!cpu_states_updated || !read_schedstat(schedstat_filename, &cpuidle_charts, &schedstat_cores_found))) {
+ for(core = 0; core < schedstat_cores_found; core++) {
+ cpuidle_charts[core].last_active_time = cpuidle_charts[core].active_time;
+
+ int r = read_cpuidle_states(cpuidle_name_filename, cpuidle_time_filename, cpuidle_charts, core);
+ if(likely(r != -1 && (do_cpuidle == CONFIG_BOOLEAN_YES || r > 0))) {
+ do_cpuidle = CONFIG_BOOLEAN_YES;
+
+ char cpuidle_chart_id[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(cpuidle_chart_id, RRD_ID_LENGTH_MAX, "cpu%zu_cpuidle", core);
+
+ if(unlikely(!cpuidle_charts[core].st)) {
+ cpuidle_charts[core].st = rrdset_create_localhost(
+ "cpu"
+ , cpuidle_chart_id
+ , NULL
+ , "cpuidle"
+ , "cpuidle.cpuidle"
+ , "C-state residency time"
+ , "percentage"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_STAT_NAME
+ , NETDATA_CHART_PRIO_CPUIDLE + core
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ char cpuidle_dim_id[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(cpuidle_dim_id, RRD_ID_LENGTH_MAX, "cpu%zu_active_time", core);
+ cpuidle_charts[core].active_time_rd = rrddim_add(cpuidle_charts[core].st, cpuidle_dim_id, "C0 (active)", 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ for(state = 0; state < cpuidle_charts[core].cpuidle_state_len; state++) {
+ snprintfz(cpuidle_dim_id, RRD_ID_LENGTH_MAX, "cpu%zu_cpuidle_state%zu_time", core, state);
+ cpuidle_charts[core].cpuidle_state[state].rd = rrddim_add(cpuidle_charts[core].st, cpuidle_dim_id,
+ cpuidle_charts[core].cpuidle_state[state].name,
+ 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ }
+ }
+ else
+ rrdset_next(cpuidle_charts[core].st);
+
+ rrddim_set_by_pointer(cpuidle_charts[core].st, cpuidle_charts[core].active_time_rd, cpuidle_charts[core].active_time);
+ for(state = 0; state < cpuidle_charts[core].cpuidle_state_len; state++) {
+ rrddim_set_by_pointer(cpuidle_charts[core].st, cpuidle_charts[core].cpuidle_state[state].rd, cpuidle_charts[core].cpuidle_state[state].value);
+ }
+ rrdset_done(cpuidle_charts[core].st);
+ }
+ }
+ }
+ }
+
if(cpus_var)
rrdvar_custom_host_variable_set(localhost, cpus_var, cores_found);
diff --git a/collectors/proc.plugin/proc_vmstat.c b/collectors/proc.plugin/proc_vmstat.c
index f7c93c20..a9712b24 100644
--- a/collectors/proc.plugin/proc_vmstat.c
+++ b/collectors/proc.plugin/proc_vmstat.c
@@ -105,7 +105,7 @@ int do_proc_vmstat(int update_every, usec_t dt) {
, "swap"
, NULL
, "Swap I/O"
- , "kilobytes/s"
+ , "KiB/s"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_VMSTAT_NAME
, NETDATA_CHART_PRIO_SYSTEM_SWAPIO
@@ -137,7 +137,7 @@ int do_proc_vmstat(int update_every, usec_t dt) {
, "disk"
, NULL
, "Memory Paged from/to disk"
- , "kilobytes/s"
+ , "KiB/s"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_VMSTAT_NAME
, NETDATA_CHART_PRIO_SYSTEM_PGPGIO
@@ -169,7 +169,7 @@ int do_proc_vmstat(int update_every, usec_t dt) {
, "system"
, NULL
, "Memory Page Faults"
- , "page faults/s"
+ , "faults/s"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_VMSTAT_NAME
, NETDATA_CHART_PRIO_MEM_SYSTEM_PGFAULTS
diff --git a/collectors/proc.plugin/sys_class_power_supply.c b/collectors/proc.plugin/sys_class_power_supply.c
new file mode 100644
index 00000000..09cdc7c0
--- /dev/null
+++ b/collectors/proc.plugin/sys_class_power_supply.c
@@ -0,0 +1,383 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_POWER_SUPPLY_NAME "/sys/class/power_supply"
+
+const char *ps_property_names[] = { "charge", "energy", "voltage"};
+const char *ps_property_titles[] = {"Battery charge", "Battery energy", "Power supply voltage"};
+const char *ps_property_units[] = { "Ah", "Wh", "V"};
+
+const char *ps_property_dim_names[] = {"empty_design", "empty", "now", "full", "full_design",
+ "empty_design", "empty", "now", "full", "full_design",
+ "min_design", "min", "now", "max", "max_design"};
+
+struct ps_property_dim {
+ char *name;
+ char *filename;
+ int fd;
+
+ RRDDIM *rd;
+ unsigned long long value;
+
+ struct ps_property_dim *next;
+};
+
+struct ps_property {
+ char *name;
+ char *title;
+ char *units;
+
+ RRDSET *st;
+
+ struct ps_property_dim *property_dim_root;
+
+ struct ps_property *next;
+};
+
+struct capacity {
+ char *filename;
+ int fd;
+
+ RRDSET *st;
+ RRDDIM *rd;
+ unsigned long long value;
+};
+
+struct power_supply {
+ char *name;
+ uint32_t hash;
+ int found;
+
+ struct capacity *capacity;
+
+ struct ps_property *property_root;
+
+ struct power_supply *next;
+};
+
+static struct power_supply *power_supply_root = NULL;
+static int files_num = 0;
+
+void power_supply_free(struct power_supply *ps) {
+ if(likely(ps)) {
+
+ // free capacity structure
+ if(likely(ps->capacity)) {
+ if(likely(ps->capacity->st)) rrdset_is_obsolete(ps->capacity->st);
+ freez(ps->capacity->filename);
+ if(likely(ps->capacity->fd != -1)) close(ps->capacity->fd);
+ files_num--;
+ freez(ps->capacity);
+ }
+ freez(ps->name);
+
+ struct ps_property *pr = ps->property_root;
+ while(likely(pr)) {
+
+ // free dimensions
+ struct ps_property_dim *pd = pr->property_dim_root;
+ while(likely(pd)) {
+ freez(pd->name);
+ freez(pd->filename);
+ if(likely(pd->fd != -1)) close(pd->fd);
+ files_num--;
+ struct ps_property_dim *d = pd;
+ pd = pd->next;
+ freez(d);
+ }
+
+ // free properties
+ if(likely(pr->st)) rrdset_is_obsolete(pr->st);
+ freez(pr->name);
+ freez(pr->title);
+ freez(pr->units);
+ struct ps_property *p = pr;
+ pr = pr->next;
+ freez(p);
+ }
+
+ // remove power supply from linked list
+ if(likely(ps == power_supply_root)) {
+ power_supply_root = ps->next;
+ }
+ else {
+ struct power_supply *last;
+ for(last = power_supply_root; last && last->next != ps; last = last->next);
+ if(likely(last)) last->next = ps->next;
+ }
+
+ freez(ps);
+ }
+}
+
+int do_sys_class_power_supply(int update_every, usec_t dt) {
+ (void)dt;
+ static int do_capacity = -1, do_property[3] = {-1};
+ static int keep_fds_open = CONFIG_BOOLEAN_NO, keep_fds_open_config = -1;
+ static char *dirname = NULL;
+
+ if(unlikely(do_capacity == -1)) {
+ do_capacity = config_get_boolean("plugin:proc:/sys/class/power_supply", "battery capacity", CONFIG_BOOLEAN_YES);
+ do_property[0] = config_get_boolean("plugin:proc:/sys/class/power_supply", "battery charge", CONFIG_BOOLEAN_NO);
+ do_property[1] = config_get_boolean("plugin:proc:/sys/class/power_supply", "battery energy", CONFIG_BOOLEAN_NO);
+ do_property[2] = config_get_boolean("plugin:proc:/sys/class/power_supply", "power supply voltage", CONFIG_BOOLEAN_NO);
+
+ keep_fds_open_config = config_get_boolean_ondemand("plugin:proc:/sys/class/power_supply", "keep files open", CONFIG_BOOLEAN_AUTO);
+
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/class/power_supply");
+ dirname = config_get("plugin:proc:/sys/class/power_supply", "directory to monitor", filename);
+ }
+
+ DIR *dir = opendir(dirname);
+ if(unlikely(!dir)) {
+ error("Cannot read directory '%s'", dirname);
+ return 1;
+ }
+
+ struct dirent *de = NULL;
+ while(likely(de = readdir(dir))) {
+ if(likely(de->d_type == DT_DIR
+ && (
+ (de->d_name[0] == '.' && de->d_name[1] == '\0')
+ || (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0')
+ )))
+ continue;
+
+ if(likely(de->d_type == DT_LNK || de->d_type == DT_DIR)) {
+ uint32_t hash = simple_hash(de->d_name);
+
+ struct power_supply *ps;
+ for(ps = power_supply_root; ps; ps = ps->next) {
+ if(unlikely(ps->hash == hash && !strcmp(ps->name, de->d_name))) {
+ ps->found = 1;
+ break;
+ }
+ }
+
+ // allocate memory for power supply and initialize it
+ if(unlikely(!ps)) {
+ ps = callocz(sizeof(struct power_supply), 1);
+ ps->name = strdupz(de->d_name);
+ ps->hash = simple_hash(de->d_name);
+ ps->found = 1;
+ ps->next = power_supply_root;
+ power_supply_root = ps;
+
+ struct stat stbuf;
+ if(likely(do_capacity != CONFIG_BOOLEAN_NO)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/%s/%s", dirname, de->d_name, "capacity");
+ if (stat(filename, &stbuf) == 0) {
+ ps->capacity = callocz(sizeof(struct capacity), 1);
+ ps->capacity->filename = strdupz(filename);
+ ps->capacity->fd = -1;
+ files_num++;
+ }
+ }
+
+ // allocate memory and initialize structures for every property and file found
+ size_t pr_idx, pd_idx;
+ size_t prev_idx = 3; // there is no property with this index
+
+ for(pr_idx = 0; pr_idx < 3; pr_idx++) {
+ if(unlikely(do_property[pr_idx] != CONFIG_BOOLEAN_NO)) {
+ struct ps_property *pr = NULL;
+
+ for(pd_idx = pr_idx * 5; pd_idx < pr_idx * 5 + 5; pd_idx++) {
+
+ // check if file exists
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/%s/%s_%s", dirname, de->d_name,
+ ps_property_names[pr_idx], ps_property_dim_names[pd_idx]);
+ if (stat(filename, &stbuf) == 0) {
+
+ // add chart
+ if(unlikely(prev_idx != pr_idx)) {
+ pr = callocz(sizeof(struct ps_property), 1);
+ pr->name = strdupz(ps_property_names[pr_idx]);
+ pr->title = strdupz(ps_property_titles[pr_idx]);
+ pr->units = strdupz(ps_property_units[pr_idx]);
+ prev_idx = pr_idx;
+ pr->next = ps->property_root;
+ ps->property_root = pr;
+ }
+
+ // add dimension
+ struct ps_property_dim *pd;
+ pd= callocz(sizeof(struct ps_property_dim), 1);
+ pd->name = strdupz(ps_property_dim_names[pd_idx]);
+ pd->filename = strdupz(filename);
+ pd->fd = -1;
+ files_num++;
+ pd->next = pr->property_dim_root;
+ pr->property_dim_root = pd;
+ }
+ }
+ }
+ }
+ }
+
+ // read capacity file
+ if(likely(ps->capacity)) {
+ char buffer[30 + 1];
+
+ if(unlikely(ps->capacity->fd == -1)) {
+ ps->capacity->fd = open(ps->capacity->filename, O_RDONLY, 0666);
+ if(unlikely(ps->capacity->fd == -1)) {
+ error("Cannot open file '%s'", ps->capacity->filename);
+ power_supply_free(ps);
+ }
+ }
+
+ ssize_t r = read(ps->capacity->fd, buffer, 30);
+ if(unlikely(r < 1)) {
+ error("Cannot read file '%s'", ps->capacity->filename);
+ power_supply_free(ps);
+ }
+ else {
+ buffer[r] = '\0';
+ ps->capacity->value = str2ull(buffer);
+ }
+
+ if(unlikely(!keep_fds_open)) {
+ close(ps->capacity->fd);
+ ps->capacity->fd = -1;
+ }
+ else if(unlikely(lseek(ps->capacity->fd, 0, SEEK_SET) == -1)) {
+ error("Cannot seek in file '%s'", ps->capacity->filename);
+ close(ps->capacity->fd);
+ ps->capacity->fd = -1;
+ }
+ }
+
+ // read property files
+ int read_error = 0;
+ struct ps_property *pr;
+ for(pr = ps->property_root; pr && !read_error; pr = pr->next) {
+ struct ps_property_dim *pd;
+ for(pd = pr->property_dim_root; pd; pd = pd->next) {
+ char buffer[30 + 1];
+
+ if(unlikely(pd->fd == -1)) {
+ pd->fd = open(pd->filename, O_RDONLY, 0666);
+ if(unlikely(pd->fd == -1)) {
+ error("Cannot open file '%s'", pd->filename);
+ read_error = 1;
+ power_supply_free(ps);
+ break;
+ }
+ }
+
+ ssize_t r = read(pd->fd, buffer, 30);
+ if(unlikely(r < 1)) {
+ error("Cannot read file '%s'", pd->filename);
+ read_error = 1;
+ power_supply_free(ps);
+ break;
+ }
+ buffer[r] = '\0';
+ pd->value = str2ull(buffer);
+
+ if(unlikely(!keep_fds_open)) {
+ close(pd->fd);
+ pd->fd = -1;
+ }
+ else if(unlikely(lseek(pd->fd, 0, SEEK_SET) == -1)) {
+ error("Cannot seek in file '%s'", pd->filename);
+ close(pd->fd);
+ pd->fd = -1;
+ }
+ }
+ }
+ }
+ }
+
+ closedir(dir);
+
+ keep_fds_open = keep_fds_open_config;
+ if(likely(keep_fds_open_config == CONFIG_BOOLEAN_AUTO)) {
+ if(unlikely(files_num > 32))
+ keep_fds_open = CONFIG_BOOLEAN_NO;
+ else
+ keep_fds_open = CONFIG_BOOLEAN_YES;
+ }
+
+ // --------------------------------------------------------------------
+
+ struct power_supply *ps = power_supply_root;
+ while(unlikely(ps)) {
+ if(unlikely(!ps->found)) {
+ struct power_supply *f = ps;
+ ps = ps->next;
+ power_supply_free(f);
+ continue;
+ }
+
+ if(likely(ps->capacity)) {
+ if(unlikely(!ps->capacity->st)) {
+ ps->capacity->st = rrdset_create_localhost(
+ "powersupply_capacity"
+ , ps->name
+ , NULL
+ , ps->name
+ , "powersupply.capacity"
+ , "Battery capacity"
+ , "percentage"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_POWER_SUPPLY_NAME
+ , NETDATA_CHART_PRIO_POWER_SUPPLY_CAPACITY
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ }
+ else
+ rrdset_next(ps->capacity->st);
+
+ if(unlikely(!ps->capacity->rd)) ps->capacity->rd = rrddim_add(ps->capacity->st, "capacity", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_set_by_pointer(ps->capacity->st, ps->capacity->rd, ps->capacity->value);
+
+ rrdset_done(ps->capacity->st);
+ }
+
+ struct ps_property *pr;
+ for(pr = ps->property_root; pr; pr = pr->next) {
+ if(unlikely(!pr->st)) {
+ char id[RRD_ID_LENGTH_MAX + 1], context[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(id, RRD_ID_LENGTH_MAX, "powersupply_%s", pr->name);
+ snprintfz(context, RRD_ID_LENGTH_MAX, "powersupply.%s", pr->name);
+
+ pr->st = rrdset_create_localhost(
+ id
+ , ps->name
+ , NULL
+ , ps->name
+ , context
+ , pr->title
+ , pr->units
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_POWER_SUPPLY_NAME
+ , NETDATA_CHART_PRIO_POWER_SUPPLY_CAPACITY
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ }
+ else
+ rrdset_next(pr->st);
+
+ struct ps_property_dim *pd;
+ for(pd = pr->property_dim_root; pd; pd = pd->next) {
+ if(unlikely(!pd->rd)) pd->rd = rrddim_add(pr->st, pd->name, NULL, 1, 1000000, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_set_by_pointer(pr->st, pd->rd, pd->value);
+ }
+
+ rrdset_done(pr->st);
+ }
+
+ ps->found = 0;
+ ps = ps->next;
+ }
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/sys_fs_btrfs.c b/collectors/proc.plugin/sys_fs_btrfs.c
index ed980cea..5aab24c1 100644
--- a/collectors/proc.plugin/sys_fs_btrfs.c
+++ b/collectors/proc.plugin/sys_fs_btrfs.c
@@ -558,7 +558,7 @@ int do_sys_fs_btrfs(int update_every, usec_t dt) {
, node->label
, "btrfs.disk"
, title
- , "MB"
+ , "MiB"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_BTRFS_NAME
, NETDATA_CHART_PRIO_BTRFS_DISK
@@ -614,7 +614,7 @@ int do_sys_fs_btrfs(int update_every, usec_t dt) {
, node->label
, "btrfs.data"
, title
- , "MB"
+ , "MiB"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_BTRFS_NAME
, NETDATA_CHART_PRIO_BTRFS_DATA
@@ -655,7 +655,7 @@ int do_sys_fs_btrfs(int update_every, usec_t dt) {
, node->label
, "btrfs.metadata"
, title
- , "MB"
+ , "MiB"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_BTRFS_NAME
, NETDATA_CHART_PRIO_BTRFS_METADATA
@@ -698,7 +698,7 @@ int do_sys_fs_btrfs(int update_every, usec_t dt) {
, node->label
, "btrfs.system"
, title
- , "MB"
+ , "MiB"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_BTRFS_NAME
, NETDATA_CHART_PRIO_BTRFS_SYSTEM
diff --git a/collectors/proc.plugin/sys_kernel_mm_ksm.c b/collectors/proc.plugin/sys_kernel_mm_ksm.c
index 0f5c79c4..0b64987c 100644
--- a/collectors/proc.plugin/sys_kernel_mm_ksm.c
+++ b/collectors/proc.plugin/sys_kernel_mm_ksm.c
@@ -105,7 +105,7 @@ int do_sys_kernel_mm_ksm(int update_every, usec_t dt) {
, "ksm"
, NULL
, "Kernel Same Page Merging"
- , "MB"
+ , "MiB"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_KSM_NAME
, NETDATA_CHART_PRIO_MEM_KSM
@@ -145,7 +145,7 @@ int do_sys_kernel_mm_ksm(int update_every, usec_t dt) {
, "ksm"
, NULL
, "Kernel Same Page Merging Savings"
- , "MB"
+ , "MiB"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_KSM_NAME
, NETDATA_CHART_PRIO_MEM_KSM_SAVINGS
diff --git a/collectors/proc.plugin/zfs_common.c b/collectors/proc.plugin/zfs_common.c
index 1aaceb90..330bcf18 100644
--- a/collectors/proc.plugin/zfs_common.c
+++ b/collectors/proc.plugin/zfs_common.c
@@ -4,7 +4,13 @@
struct arcstats arcstats = { 0 };
-void generate_charts_arcstats(const char *plugin, const char *module, int update_every) {
+void generate_charts_arcstats(const char *plugin, const char *module, int show_zero_charts, int update_every) {
+ static int do_arc_size = -1, do_l2_size = -1, do_reads = -1, do_l2bytes = -1, do_ahits = -1, do_dhits = -1, \
+ do_phits = -1, do_mhits = -1, do_l2hits = -1, do_list_hits = -1;
+
+ if(unlikely(do_arc_size == -1))
+ do_arc_size = do_l2_size = do_reads = do_l2bytes = do_ahits = do_dhits = do_phits = do_mhits \
+ = do_l2hits = do_list_hits = show_zero_charts;
// ARC reads
unsigned long long aread = arcstats.hits + arcstats.misses;
@@ -31,7 +37,9 @@ void generate_charts_arcstats(const char *plugin, const char *module, int update
// --------------------------------------------------------------------
- {
+ if(do_arc_size == CONFIG_BOOLEAN_YES || arcstats.size || arcstats.c || arcstats.c_min || arcstats.c_max) {
+ do_arc_size = CONFIG_BOOLEAN_YES;
+
static RRDSET *st_arc_size = NULL;
static RRDDIM *rd_arc_size = NULL;
static RRDDIM *rd_arc_target_size = NULL;
@@ -46,7 +54,7 @@ void generate_charts_arcstats(const char *plugin, const char *module, int update
, ZFS_FAMILY_SIZE
, NULL
, "ZFS ARC Size"
- , "MB"
+ , "MiB"
, plugin
, module
, NETDATA_CHART_PRIO_ZFS_ARC_SIZE
@@ -71,7 +79,9 @@ void generate_charts_arcstats(const char *plugin, const char *module, int update
// --------------------------------------------------------------------
- if(likely(arcstats.l2exist)) {
+ if(likely(arcstats.l2exist) && (do_l2_size == CONFIG_BOOLEAN_YES || arcstats.l2_size || arcstats.l2_asize)) {
+ do_l2_size = CONFIG_BOOLEAN_YES;
+
static RRDSET *st_l2_size = NULL;
static RRDDIM *rd_l2_size = NULL;
static RRDDIM *rd_l2_asize = NULL;
@@ -84,7 +94,7 @@ void generate_charts_arcstats(const char *plugin, const char *module, int update
, ZFS_FAMILY_SIZE
, NULL
, "ZFS L2 ARC Size"
- , "MB"
+ , "MiB"
, plugin
, module
, NETDATA_CHART_PRIO_ZFS_L2_SIZE
@@ -105,7 +115,9 @@ void generate_charts_arcstats(const char *plugin, const char *module, int update
// --------------------------------------------------------------------
- {
+ if(likely(do_reads == CONFIG_BOOLEAN_YES || aread || dread || pread || mread || l2read)) {
+ do_reads = CONFIG_BOOLEAN_YES;
+
static RRDSET *st_reads = NULL;
static RRDDIM *rd_aread = NULL;
static RRDDIM *rd_dread = NULL;
@@ -153,7 +165,9 @@ void generate_charts_arcstats(const char *plugin, const char *module, int update
// --------------------------------------------------------------------
- if(likely(arcstats.l2exist)) {
+ if(likely(arcstats.l2exist && (do_l2bytes == CONFIG_BOOLEAN_YES || arcstats.l2_read_bytes || arcstats.l2_write_bytes))) {
+ do_l2bytes = CONFIG_BOOLEAN_YES;
+
static RRDSET *st_l2bytes = NULL;
static RRDDIM *rd_l2_read_bytes = NULL;
static RRDDIM *rd_l2_write_bytes = NULL;
@@ -166,7 +180,7 @@ void generate_charts_arcstats(const char *plugin, const char *module, int update
, ZFS_FAMILY_ACCESSES
, NULL
, "ZFS ARC L2 Read/Write Rate"
- , "kilobytes/s"
+ , "KiB/s"
, plugin
, module
, NETDATA_CHART_PRIO_ZFS_IO
@@ -187,7 +201,9 @@ void generate_charts_arcstats(const char *plugin, const char *module, int update
// --------------------------------------------------------------------
- {
+ if(likely(do_ahits == CONFIG_BOOLEAN_YES || arcstats.hits || arcstats.misses)) {
+ do_ahits = CONFIG_BOOLEAN_YES;
+
static RRDSET *st_ahits = NULL;
static RRDDIM *rd_ahits = NULL;
static RRDDIM *rd_amisses = NULL;
@@ -221,7 +237,9 @@ void generate_charts_arcstats(const char *plugin, const char *module, int update
// --------------------------------------------------------------------
- {
+ if(likely(do_dhits == CONFIG_BOOLEAN_YES || dhit || dmiss)) {
+ do_dhits = CONFIG_BOOLEAN_YES;
+
static RRDSET *st_dhits = NULL;
static RRDDIM *rd_dhits = NULL;
static RRDDIM *rd_dmisses = NULL;
@@ -255,7 +273,9 @@ void generate_charts_arcstats(const char *plugin, const char *module, int update
// --------------------------------------------------------------------
- {
+ if(likely(do_phits == CONFIG_BOOLEAN_YES || phit || pmiss)) {
+ do_phits = CONFIG_BOOLEAN_YES;
+
static RRDSET *st_phits = NULL;
static RRDDIM *rd_phits = NULL;
static RRDDIM *rd_pmisses = NULL;
@@ -289,7 +309,9 @@ void generate_charts_arcstats(const char *plugin, const char *module, int update
// --------------------------------------------------------------------
- {
+ if(likely(do_mhits == CONFIG_BOOLEAN_YES || mhit || mmiss)) {
+ do_mhits = CONFIG_BOOLEAN_YES;
+
static RRDSET *st_mhits = NULL;
static RRDDIM *rd_mhits = NULL;
static RRDDIM *rd_mmisses = NULL;
@@ -323,7 +345,9 @@ void generate_charts_arcstats(const char *plugin, const char *module, int update
// --------------------------------------------------------------------
- if(likely(arcstats.l2exist)) {
+ if(likely(arcstats.l2exist && (do_l2hits == CONFIG_BOOLEAN_YES || l2hit || l2miss))) {
+ do_l2hits = CONFIG_BOOLEAN_YES;
+
static RRDSET *st_l2hits = NULL;
static RRDDIM *rd_l2hits = NULL;
static RRDDIM *rd_l2misses = NULL;
@@ -357,7 +381,12 @@ void generate_charts_arcstats(const char *plugin, const char *module, int update
// --------------------------------------------------------------------
- {
+ if(likely(do_list_hits == CONFIG_BOOLEAN_YES || arcstats.mfu_hits \
+ || arcstats.mru_hits \
+ || arcstats.mfu_ghost_hits \
+ || arcstats.mru_ghost_hits)) {
+ do_list_hits = CONFIG_BOOLEAN_YES;
+
static RRDSET *st_list_hits = NULL;
static RRDDIM *rd_mfu = NULL;
static RRDDIM *rd_mru = NULL;
@@ -396,7 +425,14 @@ void generate_charts_arcstats(const char *plugin, const char *module, int update
}
}
-void generate_charts_arc_summary(const char *plugin, const char *module, int update_every) {
+void generate_charts_arc_summary(const char *plugin, const char *module, int show_zero_charts, int update_every) {
+ static int do_arc_size_breakdown = -1, do_memory = -1, do_important_ops = -1, do_actual_hits = -1, \
+ do_demand_data_hits = -1, do_prefetch_data_hits = -1, do_hash_elements = -1, do_hash_chains = -1;
+
+ if(unlikely(do_arc_size_breakdown == -1))
+ do_arc_size_breakdown = do_memory = do_important_ops = do_actual_hits = do_demand_data_hits \
+ = do_prefetch_data_hits = do_hash_elements = do_hash_chains = show_zero_charts;
+
unsigned long long arc_accesses_total = arcstats.hits + arcstats.misses;
unsigned long long real_hits = arcstats.mfu_hits + arcstats.mru_hits;
unsigned long long real_misses = arc_accesses_total - real_hits;
@@ -418,7 +454,9 @@ void generate_charts_arc_summary(const char *plugin, const char *module, int upd
// --------------------------------------------------------------------
- {
+ if(likely(do_arc_size_breakdown == CONFIG_BOOLEAN_YES || mru_size || mfu_size)) {
+ do_arc_size_breakdown = CONFIG_BOOLEAN_YES;
+
static RRDSET *st_arc_size_breakdown = NULL;
static RRDDIM *rd_most_recent = NULL;
static RRDDIM *rd_most_frequent = NULL;
@@ -452,7 +490,11 @@ void generate_charts_arc_summary(const char *plugin, const char *module, int upd
// --------------------------------------------------------------------
- {
+ if(likely(do_memory == CONFIG_BOOLEAN_YES || arcstats.memory_direct_count \
+ || arcstats.memory_throttle_count \
+ || arcstats.memory_indirect_count)) {
+ do_memory = CONFIG_BOOLEAN_YES;
+
static RRDSET *st_memory = NULL;
#ifndef __FreeBSD__
static RRDDIM *rd_direct = NULL;
@@ -501,7 +543,12 @@ void generate_charts_arc_summary(const char *plugin, const char *module, int upd
// --------------------------------------------------------------------
- {
+ if(likely(do_important_ops == CONFIG_BOOLEAN_YES || arcstats.deleted \
+ || arcstats.evict_skip \
+ || arcstats.mutex_miss \
+ || arcstats.hash_collisions)) {
+ do_important_ops = CONFIG_BOOLEAN_YES;
+
static RRDSET *st_important_ops = NULL;
static RRDDIM *rd_deleted = NULL;
static RRDDIM *rd_mutex_misses = NULL;
@@ -541,7 +588,9 @@ void generate_charts_arc_summary(const char *plugin, const char *module, int upd
// --------------------------------------------------------------------
- {
+ if(likely(do_actual_hits == CONFIG_BOOLEAN_YES || real_hits || real_misses)) {
+ do_actual_hits = CONFIG_BOOLEAN_YES;
+
static RRDSET *st_actual_hits = NULL;
static RRDDIM *rd_actual_hits = NULL;
static RRDDIM *rd_actual_misses = NULL;
@@ -575,7 +624,9 @@ void generate_charts_arc_summary(const char *plugin, const char *module, int upd
// --------------------------------------------------------------------
- {
+ if(likely(do_demand_data_hits == CONFIG_BOOLEAN_YES || arcstats.demand_data_hits || arcstats.demand_data_misses)) {
+ do_demand_data_hits = CONFIG_BOOLEAN_YES;
+
static RRDSET *st_demand_data_hits = NULL;
static RRDDIM *rd_demand_data_hits = NULL;
static RRDDIM *rd_demand_data_misses = NULL;
@@ -609,7 +660,10 @@ void generate_charts_arc_summary(const char *plugin, const char *module, int upd
// --------------------------------------------------------------------
- {
+ if(likely(do_prefetch_data_hits == CONFIG_BOOLEAN_YES || arcstats.prefetch_data_hits \
+ || arcstats.prefetch_data_misses)) {
+ do_prefetch_data_hits = CONFIG_BOOLEAN_YES;
+
static RRDSET *st_prefetch_data_hits = NULL;
static RRDDIM *rd_prefetch_data_hits = NULL;
static RRDDIM *rd_prefetch_data_misses = NULL;
@@ -643,7 +697,9 @@ void generate_charts_arc_summary(const char *plugin, const char *module, int upd
// --------------------------------------------------------------------
- {
+ if(likely(do_hash_elements == CONFIG_BOOLEAN_YES || arcstats.hash_elements || arcstats.hash_elements_max)) {
+ do_hash_elements = CONFIG_BOOLEAN_YES;
+
static RRDSET *st_hash_elements = NULL;
static RRDDIM *rd_hash_elements_current = NULL;
static RRDDIM *rd_hash_elements_max = NULL;
@@ -677,7 +733,9 @@ void generate_charts_arc_summary(const char *plugin, const char *module, int upd
// --------------------------------------------------------------------
- {
+ if(likely(do_hash_chains == CONFIG_BOOLEAN_YES || arcstats.hash_chains || arcstats.hash_chain_max)) {
+ do_hash_chains = CONFIG_BOOLEAN_YES;
+
static RRDSET *st_hash_chains = NULL;
static RRDDIM *rd_hash_chains_current = NULL;
static RRDDIM *rd_hash_chains_max = NULL;
diff --git a/collectors/proc.plugin/zfs_common.h b/collectors/proc.plugin/zfs_common.h
index fab54f59..148f9e47 100644
--- a/collectors/proc.plugin/zfs_common.h
+++ b/collectors/proc.plugin/zfs_common.h
@@ -109,7 +109,7 @@ struct arcstats {
int l2exist;
};
-void generate_charts_arcstats(const char *plugin, const char *module, int update_every);
-void generate_charts_arc_summary(const char *plugin, const char *module, int update_every);
+void generate_charts_arcstats(const char *plugin, const char *module, int show_zero_charts, int update_every);
+void generate_charts_arc_summary(const char *plugin, const char *module, int show_zero_charts, int update_every);
#endif //NETDATA_ZFS_COMMON_H
diff --git a/collectors/python.d.plugin/.keep b/collectors/python.d.plugin/.keep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collectors/python.d.plugin/.keep
diff --git a/collectors/python.d.plugin/Makefile.am b/collectors/python.d.plugin/Makefile.am
index 984050c4..3599d9c9 100644
--- a/collectors/python.d.plugin/Makefile.am
+++ b/collectors/python.d.plugin/Makefile.am
@@ -29,12 +29,11 @@ dist_python_DATA = \
userpythonconfigdir=$(configdir)/python.d
dist_userpythonconfig_DATA = \
- $(top_srcdir)/installer/.keep \
+ .keep \
$(NULL)
pythonconfigdir=$(libconfigdir)/python.d
dist_pythonconfig_DATA = \
- $(top_srcdir)/installer/.keep \
$(NULL)
include adaptec_raid/Makefile.inc
diff --git a/collectors/python.d.plugin/Makefile.in b/collectors/python.d.plugin/Makefile.in
deleted file mode 100644
index 49560689..00000000
--- a/collectors/python.d.plugin/Makefile.in
+++ /dev/null
@@ -1,2025 +0,0 @@
-# Makefile.in generated by automake 1.14.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2013 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-
-VPATH = @srcdir@
-am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-DIST_COMMON = $(top_srcdir)/build/subst.inc \
- $(srcdir)/adaptec_raid/Makefile.inc \
- $(srcdir)/apache/Makefile.inc $(srcdir)/beanstalk/Makefile.inc \
- $(srcdir)/bind_rndc/Makefile.inc $(srcdir)/boinc/Makefile.inc \
- $(srcdir)/ceph/Makefile.inc $(srcdir)/chrony/Makefile.inc \
- $(srcdir)/couchdb/Makefile.inc $(srcdir)/cpufreq/Makefile.inc \
- $(srcdir)/cpuidle/Makefile.inc $(srcdir)/dnsdist/Makefile.inc \
- $(srcdir)/dns_query_time/Makefile.inc \
- $(srcdir)/dockerd/Makefile.inc $(srcdir)/dovecot/Makefile.inc \
- $(srcdir)/elasticsearch/Makefile.inc \
- $(srcdir)/example/Makefile.inc $(srcdir)/exim/Makefile.inc \
- $(srcdir)/fail2ban/Makefile.inc \
- $(srcdir)/freeradius/Makefile.inc \
- $(srcdir)/go_expvar/Makefile.inc \
- $(srcdir)/haproxy/Makefile.inc $(srcdir)/hddtemp/Makefile.inc \
- $(srcdir)/httpcheck/Makefile.inc \
- $(srcdir)/icecast/Makefile.inc $(srcdir)/ipfs/Makefile.inc \
- $(srcdir)/isc_dhcpd/Makefile.inc \
- $(srcdir)/linux_power_supply/Makefile.inc \
- $(srcdir)/litespeed/Makefile.inc $(srcdir)/logind/Makefile.inc \
- $(srcdir)/mdstat/Makefile.inc $(srcdir)/megacli/Makefile.inc \
- $(srcdir)/memcached/Makefile.inc \
- $(srcdir)/mongodb/Makefile.inc $(srcdir)/monit/Makefile.inc \
- $(srcdir)/mysql/Makefile.inc $(srcdir)/nginx/Makefile.inc \
- $(srcdir)/nginx_plus/Makefile.inc \
- $(srcdir)/nvidia_smi/Makefile.inc $(srcdir)/nsd/Makefile.inc \
- $(srcdir)/ntpd/Makefile.inc \
- $(srcdir)/ovpn_status_log/Makefile.inc \
- $(srcdir)/openldap/Makefile.inc $(srcdir)/phpfpm/Makefile.inc \
- $(srcdir)/portcheck/Makefile.inc \
- $(srcdir)/postfix/Makefile.inc $(srcdir)/postgres/Makefile.inc \
- $(srcdir)/powerdns/Makefile.inc \
- $(srcdir)/proxysql/Makefile.inc $(srcdir)/puppet/Makefile.inc \
- $(srcdir)/rabbitmq/Makefile.inc $(srcdir)/redis/Makefile.inc \
- $(srcdir)/rethinkdbs/Makefile.inc \
- $(srcdir)/retroshare/Makefile.inc $(srcdir)/samba/Makefile.inc \
- $(srcdir)/sensors/Makefile.inc \
- $(srcdir)/smartd_log/Makefile.inc \
- $(srcdir)/spigotmc/Makefile.inc \
- $(srcdir)/springboot/Makefile.inc $(srcdir)/squid/Makefile.inc \
- $(srcdir)/tomcat/Makefile.inc $(srcdir)/tor/Makefile.inc \
- $(srcdir)/traefik/Makefile.inc $(srcdir)/unbound/Makefile.inc \
- $(srcdir)/uwsgi/Makefile.inc $(srcdir)/varnish/Makefile.inc \
- $(srcdir)/w1sensor/Makefile.inc $(srcdir)/web_log/Makefile.inc \
- $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
- $(dist_plugins_SCRIPTS) $(dist_python_SCRIPTS) \
- $(dist_bases_DATA) $(dist_bases_framework_services_DATA) \
- $(dist_libconfig_DATA) $(dist_noinst_DATA) $(dist_python_DATA) \
- $(dist_python_urllib3_DATA) \
- $(dist_python_urllib3_backports_DATA) \
- $(dist_python_urllib3_contrib_DATA) \
- $(dist_python_urllib3_packages_DATA) \
- $(dist_python_urllib3_securetransport_DATA) \
- $(dist_python_urllib3_ssl_match_hostname_DATA) \
- $(dist_python_urllib3_util_DATA) $(dist_pythonconfig_DATA) \
- $(dist_pythonmodules_DATA) $(dist_pythonyaml2_DATA) \
- $(dist_pythonyaml3_DATA) $(dist_third_party_DATA) \
- $(dist_userpythonconfig_DATA)
-subdir = collectors/python.d.plugin
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
-am__vpath_adj = case $$p in \
- $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
- *) f=$$p;; \
- esac;
-am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
-am__install_max = 40
-am__nobase_strip_setup = \
- srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
-am__nobase_strip = \
- for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
-am__nobase_list = $(am__nobase_strip_setup); \
- for p in $$list; do echo "$$p $$p"; done | \
- sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
- $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
- if (++n[$$2] == $(am__install_max)) \
- { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
- END { for (dir in files) print dir, files[dir] }'
-am__base_list = \
- sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
- sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
-am__uninstall_files_from_dir = { \
- test -z "$$files" \
- || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
- || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
- $(am__cd) "$$dir" && rm -f $$files; }; \
- }
-am__installdirs = "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(pythondir)" \
- "$(DESTDIR)$(basesdir)" \
- "$(DESTDIR)$(bases_framework_servicesdir)" \
- "$(DESTDIR)$(libconfigdir)" "$(DESTDIR)$(pythondir)" \
- "$(DESTDIR)$(python_urllib3dir)" \
- "$(DESTDIR)$(python_urllib3_backportsdir)" \
- "$(DESTDIR)$(python_urllib3_contribdir)" \
- "$(DESTDIR)$(python_urllib3_packagesdir)" \
- "$(DESTDIR)$(python_urllib3_securetransportdir)" \
- "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" \
- "$(DESTDIR)$(python_urllib3_utildir)" \
- "$(DESTDIR)$(pythonconfigdir)" "$(DESTDIR)$(pythonmodulesdir)" \
- "$(DESTDIR)$(pythonyaml2dir)" "$(DESTDIR)$(pythonyaml3dir)" \
- "$(DESTDIR)$(third_partydir)" \
- "$(DESTDIR)$(userpythonconfigdir)"
-SCRIPTS = $(dist_plugins_SCRIPTS) $(dist_python_SCRIPTS)
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-DATA = $(dist_bases_DATA) $(dist_bases_framework_services_DATA) \
- $(dist_libconfig_DATA) $(dist_noinst_DATA) $(dist_python_DATA) \
- $(dist_python_urllib3_DATA) \
- $(dist_python_urllib3_backports_DATA) \
- $(dist_python_urllib3_contrib_DATA) \
- $(dist_python_urllib3_packages_DATA) \
- $(dist_python_urllib3_securetransport_DATA) \
- $(dist_python_urllib3_ssl_match_hostname_DATA) \
- $(dist_python_urllib3_util_DATA) $(dist_pythonconfig_DATA) \
- $(dist_pythonmodules_DATA) $(dist_pythonyaml2_DATA) \
- $(dist_pythonyaml3_DATA) $(dist_third_party_DATA) \
- $(dist_userpythonconfig_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-CLEANFILES = \
- python.d.plugin \
- $(NULL)
-
-SUFFIXES = .in
-dist_libconfig_DATA = \
- python.d.conf \
- $(NULL)
-
-dist_plugins_SCRIPTS = \
- python.d.plugin \
- $(NULL)
-
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA = python.d.plugin.in README.md $(NULL) \
- adaptec_raid/README.md adaptec_raid/Makefile.inc \
- apache/README.md apache/Makefile.inc beanstalk/README.md \
- beanstalk/Makefile.inc bind_rndc/README.md \
- bind_rndc/Makefile.inc boinc/README.md boinc/Makefile.inc \
- ceph/README.md ceph/Makefile.inc chrony/README.md \
- chrony/Makefile.inc couchdb/README.md couchdb/Makefile.inc \
- cpufreq/README.md cpufreq/Makefile.inc cpuidle/README.md \
- cpuidle/Makefile.inc dnsdist/README.md dnsdist/Makefile.inc \
- dns_query_time/README.md dns_query_time/Makefile.inc \
- dockerd/README.md dockerd/Makefile.inc dovecot/README.md \
- dovecot/Makefile.inc elasticsearch/README.md \
- elasticsearch/Makefile.inc example/README.md \
- example/Makefile.inc exim/README.md exim/Makefile.inc \
- fail2ban/README.md fail2ban/Makefile.inc freeradius/README.md \
- freeradius/Makefile.inc go_expvar/README.md \
- go_expvar/Makefile.inc haproxy/README.md haproxy/Makefile.inc \
- hddtemp/README.md hddtemp/Makefile.inc httpcheck/README.md \
- httpcheck/Makefile.inc icecast/README.md icecast/Makefile.inc \
- ipfs/README.md ipfs/Makefile.inc isc_dhcpd/README.md \
- isc_dhcpd/Makefile.inc linux_power_supply/README.md \
- linux_power_supply/Makefile.inc litespeed/README.md \
- litespeed/Makefile.inc logind/README.md logind/Makefile.inc \
- mdstat/README.md mdstat/Makefile.inc megacli/README.md \
- megacli/Makefile.inc memcached/README.md \
- memcached/Makefile.inc mongodb/README.md mongodb/Makefile.inc \
- monit/README.md monit/Makefile.inc mysql/README.md \
- mysql/Makefile.inc nginx/README.md nginx/Makefile.inc \
- nginx_plus/README.md nginx_plus/Makefile.inc \
- nvidia_smi/README.md nvidia_smi/Makefile.inc nsd/README.md \
- nsd/Makefile.inc ntpd/README.md ntpd/Makefile.inc \
- ovpn_status_log/README.md ovpn_status_log/Makefile.inc \
- openldap/README.md openldap/Makefile.inc phpfpm/README.md \
- phpfpm/Makefile.inc portcheck/README.md portcheck/Makefile.inc \
- postfix/README.md postfix/Makefile.inc postgres/README.md \
- postgres/Makefile.inc powerdns/README.md powerdns/Makefile.inc \
- proxysql/README.md proxysql/Makefile.inc puppet/README.md \
- puppet/Makefile.inc rabbitmq/README.md rabbitmq/Makefile.inc \
- redis/README.md redis/Makefile.inc rethinkdbs/README.md \
- rethinkdbs/Makefile.inc retroshare/README.md \
- retroshare/Makefile.inc samba/README.md samba/Makefile.inc \
- sensors/README.md sensors/Makefile.inc smartd_log/README.md \
- smartd_log/Makefile.inc spigotmc/README.md \
- spigotmc/Makefile.inc springboot/README.md \
- springboot/Makefile.inc squid/README.md squid/Makefile.inc \
- tomcat/README.md tomcat/Makefile.inc tor/README.md \
- tor/Makefile.inc traefik/README.md traefik/Makefile.inc \
- unbound/README.md unbound/Makefile.inc uwsgi/README.md \
- uwsgi/Makefile.inc varnish/README.md varnish/Makefile.inc \
- w1sensor/README.md w1sensor/Makefile.inc web_log/README.md \
- web_log/Makefile.inc
-dist_python_SCRIPTS = \
- $(NULL)
-
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-dist_python_DATA = $(NULL) adaptec_raid/adaptec_raid.chart.py \
- apache/apache.chart.py beanstalk/beanstalk.chart.py \
- bind_rndc/bind_rndc.chart.py boinc/boinc.chart.py \
- ceph/ceph.chart.py chrony/chrony.chart.py \
- couchdb/couchdb.chart.py cpufreq/cpufreq.chart.py \
- cpuidle/cpuidle.chart.py dnsdist/dnsdist.chart.py \
- dns_query_time/dns_query_time.chart.py \
- dockerd/dockerd.chart.py dovecot/dovecot.chart.py \
- elasticsearch/elasticsearch.chart.py example/example.chart.py \
- exim/exim.chart.py fail2ban/fail2ban.chart.py \
- freeradius/freeradius.chart.py go_expvar/go_expvar.chart.py \
- haproxy/haproxy.chart.py hddtemp/hddtemp.chart.py \
- httpcheck/httpcheck.chart.py icecast/icecast.chart.py \
- ipfs/ipfs.chart.py isc_dhcpd/isc_dhcpd.chart.py \
- linux_power_supply/linux_power_supply.chart.py \
- litespeed/litespeed.chart.py logind/logind.chart.py \
- mdstat/mdstat.chart.py megacli/megacli.chart.py \
- memcached/memcached.chart.py mongodb/mongodb.chart.py \
- monit/monit.chart.py mysql/mysql.chart.py nginx/nginx.chart.py \
- nginx_plus/nginx_plus.chart.py nvidia_smi/nvidia_smi.chart.py \
- nsd/nsd.chart.py ntpd/ntpd.chart.py \
- ovpn_status_log/ovpn_status_log.chart.py \
- openldap/openldap.chart.py phpfpm/phpfpm.chart.py \
- portcheck/portcheck.chart.py postfix/postfix.chart.py \
- postgres/postgres.chart.py powerdns/powerdns.chart.py \
- proxysql/proxysql.chart.py puppet/puppet.chart.py \
- rabbitmq/rabbitmq.chart.py redis/redis.chart.py \
- rethinkdbs/rethinkdbs.chart.py retroshare/retroshare.chart.py \
- samba/samba.chart.py sensors/sensors.chart.py \
- smartd_log/smartd_log.chart.py spigotmc/spigotmc.chart.py \
- springboot/springboot.chart.py squid/squid.chart.py \
- tomcat/tomcat.chart.py tor/tor.chart.py \
- traefik/traefik.chart.py unbound/unbound.chart.py \
- uwsgi/uwsgi.chart.py varnish/varnish.chart.py \
- w1sensor/w1sensor.chart.py web_log/web_log.chart.py
-userpythonconfigdir = $(configdir)/python.d
-dist_userpythonconfig_DATA = \
- $(top_srcdir)/installer/.keep \
- $(NULL)
-
-pythonconfigdir = $(libconfigdir)/python.d
-dist_pythonconfig_DATA = $(top_srcdir)/installer/.keep $(NULL) \
- adaptec_raid/adaptec_raid.conf apache/apache.conf \
- beanstalk/beanstalk.conf bind_rndc/bind_rndc.conf \
- boinc/boinc.conf ceph/ceph.conf chrony/chrony.conf \
- couchdb/couchdb.conf cpufreq/cpufreq.conf cpuidle/cpuidle.conf \
- dnsdist/dnsdist.conf dns_query_time/dns_query_time.conf \
- dockerd/dockerd.conf dovecot/dovecot.conf \
- elasticsearch/elasticsearch.conf example/example.conf \
- exim/exim.conf fail2ban/fail2ban.conf \
- freeradius/freeradius.conf go_expvar/go_expvar.conf \
- haproxy/haproxy.conf hddtemp/hddtemp.conf \
- httpcheck/httpcheck.conf icecast/icecast.conf ipfs/ipfs.conf \
- isc_dhcpd/isc_dhcpd.conf \
- linux_power_supply/linux_power_supply.conf \
- litespeed/litespeed.conf logind/logind.conf mdstat/mdstat.conf \
- megacli/megacli.conf memcached/memcached.conf \
- mongodb/mongodb.conf monit/monit.conf mysql/mysql.conf \
- nginx/nginx.conf nginx_plus/nginx_plus.conf \
- nvidia_smi/nvidia_smi.conf nsd/nsd.conf ntpd/ntpd.conf \
- ovpn_status_log/ovpn_status_log.conf openldap/openldap.conf \
- phpfpm/phpfpm.conf portcheck/portcheck.conf \
- postfix/postfix.conf postgres/postgres.conf \
- powerdns/powerdns.conf proxysql/proxysql.conf \
- puppet/puppet.conf rabbitmq/rabbitmq.conf redis/redis.conf \
- rethinkdbs/rethinkdbs.conf retroshare/retroshare.conf \
- samba/samba.conf sensors/sensors.conf \
- smartd_log/smartd_log.conf spigotmc/spigotmc.conf \
- springboot/springboot.conf squid/squid.conf tomcat/tomcat.conf \
- tor/tor.conf traefik/traefik.conf unbound/unbound.conf \
- uwsgi/uwsgi.conf varnish/varnish.conf w1sensor/w1sensor.conf \
- web_log/web_log.conf
-pythonmodulesdir = $(pythondir)/python_modules
-dist_pythonmodules_DATA = \
- python_modules/__init__.py \
- $(NULL)
-
-basesdir = $(pythonmodulesdir)/bases
-dist_bases_DATA = \
- python_modules/bases/__init__.py \
- python_modules/bases/charts.py \
- python_modules/bases/collection.py \
- python_modules/bases/loaders.py \
- python_modules/bases/loggers.py \
- $(NULL)
-
-bases_framework_servicesdir = $(basesdir)/FrameworkServices
-dist_bases_framework_services_DATA = \
- python_modules/bases/FrameworkServices/__init__.py \
- python_modules/bases/FrameworkServices/ExecutableService.py \
- python_modules/bases/FrameworkServices/LogService.py \
- python_modules/bases/FrameworkServices/MySQLService.py \
- python_modules/bases/FrameworkServices/SimpleService.py \
- python_modules/bases/FrameworkServices/SocketService.py \
- python_modules/bases/FrameworkServices/UrlService.py \
- $(NULL)
-
-third_partydir = $(pythonmodulesdir)/third_party
-dist_third_party_DATA = \
- python_modules/third_party/__init__.py \
- python_modules/third_party/ordereddict.py \
- python_modules/third_party/lm_sensors.py \
- python_modules/third_party/mcrcon.py \
- python_modules/third_party/boinc_client.py \
- python_modules/third_party/monotonic.py \
- $(NULL)
-
-pythonyaml2dir = $(pythonmodulesdir)/pyyaml2
-dist_pythonyaml2_DATA = \
- python_modules/pyyaml2/__init__.py \
- python_modules/pyyaml2/composer.py \
- python_modules/pyyaml2/constructor.py \
- python_modules/pyyaml2/cyaml.py \
- python_modules/pyyaml2/dumper.py \
- python_modules/pyyaml2/emitter.py \
- python_modules/pyyaml2/error.py \
- python_modules/pyyaml2/events.py \
- python_modules/pyyaml2/loader.py \
- python_modules/pyyaml2/nodes.py \
- python_modules/pyyaml2/parser.py \
- python_modules/pyyaml2/reader.py \
- python_modules/pyyaml2/representer.py \
- python_modules/pyyaml2/resolver.py \
- python_modules/pyyaml2/scanner.py \
- python_modules/pyyaml2/serializer.py \
- python_modules/pyyaml2/tokens.py \
- $(NULL)
-
-pythonyaml3dir = $(pythonmodulesdir)/pyyaml3
-dist_pythonyaml3_DATA = \
- python_modules/pyyaml3/__init__.py \
- python_modules/pyyaml3/composer.py \
- python_modules/pyyaml3/constructor.py \
- python_modules/pyyaml3/cyaml.py \
- python_modules/pyyaml3/dumper.py \
- python_modules/pyyaml3/emitter.py \
- python_modules/pyyaml3/error.py \
- python_modules/pyyaml3/events.py \
- python_modules/pyyaml3/loader.py \
- python_modules/pyyaml3/nodes.py \
- python_modules/pyyaml3/parser.py \
- python_modules/pyyaml3/reader.py \
- python_modules/pyyaml3/representer.py \
- python_modules/pyyaml3/resolver.py \
- python_modules/pyyaml3/scanner.py \
- python_modules/pyyaml3/serializer.py \
- python_modules/pyyaml3/tokens.py \
- $(NULL)
-
-python_urllib3dir = $(pythonmodulesdir)/urllib3
-dist_python_urllib3_DATA = \
- python_modules/urllib3/__init__.py \
- python_modules/urllib3/_collections.py \
- python_modules/urllib3/connection.py \
- python_modules/urllib3/connectionpool.py \
- python_modules/urllib3/exceptions.py \
- python_modules/urllib3/fields.py \
- python_modules/urllib3/filepost.py \
- python_modules/urllib3/response.py \
- python_modules/urllib3/poolmanager.py \
- python_modules/urllib3/request.py \
- $(NULL)
-
-python_urllib3_utildir = $(python_urllib3dir)/util
-dist_python_urllib3_util_DATA = \
- python_modules/urllib3/util/__init__.py \
- python_modules/urllib3/util/connection.py \
- python_modules/urllib3/util/request.py \
- python_modules/urllib3/util/response.py \
- python_modules/urllib3/util/retry.py \
- python_modules/urllib3/util/selectors.py \
- python_modules/urllib3/util/ssl_.py \
- python_modules/urllib3/util/timeout.py \
- python_modules/urllib3/util/url.py \
- python_modules/urllib3/util/wait.py \
- $(NULL)
-
-python_urllib3_packagesdir = $(python_urllib3dir)/packages
-dist_python_urllib3_packages_DATA = \
- python_modules/urllib3/packages/__init__.py \
- python_modules/urllib3/packages/ordered_dict.py \
- python_modules/urllib3/packages/six.py \
- $(NULL)
-
-python_urllib3_backportsdir = $(python_urllib3_packagesdir)/backports
-dist_python_urllib3_backports_DATA = \
- python_modules/urllib3/packages/backports/__init__.py \
- python_modules/urllib3/packages/backports/makefile.py \
- $(NULL)
-
-python_urllib3_ssl_match_hostnamedir = $(python_urllib3_packagesdir)/ssl_match_hostname
-dist_python_urllib3_ssl_match_hostname_DATA = \
- python_modules/urllib3/packages/ssl_match_hostname/__init__.py \
- python_modules/urllib3/packages/ssl_match_hostname/_implementation.py \
- $(NULL)
-
-python_urllib3_contribdir = $(python_urllib3dir)/contrib
-dist_python_urllib3_contrib_DATA = \
- python_modules/urllib3/contrib/__init__.py \
- python_modules/urllib3/contrib/appengine.py \
- python_modules/urllib3/contrib/ntlmpool.py \
- python_modules/urllib3/contrib/pyopenssl.py \
- python_modules/urllib3/contrib/securetransport.py \
- python_modules/urllib3/contrib/socks.py \
- $(NULL)
-
-python_urllib3_securetransportdir = $(python_urllib3_contribdir)/_securetransport
-dist_python_urllib3_securetransport_DATA = \
- python_modules/urllib3/contrib/_securetransport/__init__.py \
- python_modules/urllib3/contrib/_securetransport/bindings.py \
- python_modules/urllib3/contrib/_securetransport/low_level.py \
- $(NULL)
-
-all: all-am
-
-.SUFFIXES:
-.SUFFIXES: .in
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(srcdir)/adaptec_raid/Makefile.inc $(srcdir)/apache/Makefile.inc $(srcdir)/beanstalk/Makefile.inc $(srcdir)/bind_rndc/Makefile.inc $(srcdir)/boinc/Makefile.inc $(srcdir)/ceph/Makefile.inc $(srcdir)/chrony/Makefile.inc $(srcdir)/couchdb/Makefile.inc $(srcdir)/cpufreq/Makefile.inc $(srcdir)/cpuidle/Makefile.inc $(srcdir)/dnsdist/Makefile.inc $(srcdir)/dns_query_time/Makefile.inc $(srcdir)/dockerd/Makefile.inc $(srcdir)/dovecot/Makefile.inc $(srcdir)/elasticsearch/Makefile.inc $(srcdir)/example/Makefile.inc $(srcdir)/exim/Makefile.inc $(srcdir)/fail2ban/Makefile.inc $(srcdir)/freeradius/Makefile.inc $(srcdir)/go_expvar/Makefile.inc $(srcdir)/haproxy/Makefile.inc $(srcdir)/hddtemp/Makefile.inc $(srcdir)/httpcheck/Makefile.inc $(srcdir)/icecast/Makefile.inc $(srcdir)/ipfs/Makefile.inc $(srcdir)/isc_dhcpd/Makefile.inc $(srcdir)/linux_power_supply/Makefile.inc $(srcdir)/litespeed/Makefile.inc $(srcdir)/logind/Makefile.inc $(srcdir)/mdstat/Makefile.inc $(srcdir)/megacli/Makefile.inc $(srcdir)/memcached/Makefile.inc $(srcdir)/mongodb/Makefile.inc $(srcdir)/monit/Makefile.inc $(srcdir)/mysql/Makefile.inc $(srcdir)/nginx/Makefile.inc $(srcdir)/nginx_plus/Makefile.inc $(srcdir)/nvidia_smi/Makefile.inc $(srcdir)/nsd/Makefile.inc $(srcdir)/ntpd/Makefile.inc $(srcdir)/ovpn_status_log/Makefile.inc $(srcdir)/openldap/Makefile.inc $(srcdir)/phpfpm/Makefile.inc $(srcdir)/portcheck/Makefile.inc $(srcdir)/postfix/Makefile.inc $(srcdir)/postgres/Makefile.inc $(srcdir)/powerdns/Makefile.inc $(srcdir)/proxysql/Makefile.inc $(srcdir)/puppet/Makefile.inc $(srcdir)/rabbitmq/Makefile.inc $(srcdir)/redis/Makefile.inc $(srcdir)/rethinkdbs/Makefile.inc $(srcdir)/retroshare/Makefile.inc $(srcdir)/samba/Makefile.inc $(srcdir)/sensors/Makefile.inc $(srcdir)/smartd_log/Makefile.inc $(srcdir)/spigotmc/Makefile.inc $(srcdir)/springboot/Makefile.inc $(srcdir)/squid/Makefile.inc $(srcdir)/tomcat/Makefile.inc $(srcdir)/tor/Makefile.inc $(srcdir)/traefik/Makefile.inc $(srcdir)/unbound/Makefile.inc $(srcdir)/uwsgi/Makefile.inc $(srcdir)/varnish/Makefile.inc $(srcdir)/w1sensor/Makefile.inc $(srcdir)/web_log/Makefile.inc $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/python.d.plugin/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/python.d.plugin/Makefile
-.PRECIOUS: Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-$(top_srcdir)/build/subst.inc $(srcdir)/adaptec_raid/Makefile.inc $(srcdir)/apache/Makefile.inc $(srcdir)/beanstalk/Makefile.inc $(srcdir)/bind_rndc/Makefile.inc $(srcdir)/boinc/Makefile.inc $(srcdir)/ceph/Makefile.inc $(srcdir)/chrony/Makefile.inc $(srcdir)/couchdb/Makefile.inc $(srcdir)/cpufreq/Makefile.inc $(srcdir)/cpuidle/Makefile.inc $(srcdir)/dnsdist/Makefile.inc $(srcdir)/dns_query_time/Makefile.inc $(srcdir)/dockerd/Makefile.inc $(srcdir)/dovecot/Makefile.inc $(srcdir)/elasticsearch/Makefile.inc $(srcdir)/example/Makefile.inc $(srcdir)/exim/Makefile.inc $(srcdir)/fail2ban/Makefile.inc $(srcdir)/freeradius/Makefile.inc $(srcdir)/go_expvar/Makefile.inc $(srcdir)/haproxy/Makefile.inc $(srcdir)/hddtemp/Makefile.inc $(srcdir)/httpcheck/Makefile.inc $(srcdir)/icecast/Makefile.inc $(srcdir)/ipfs/Makefile.inc $(srcdir)/isc_dhcpd/Makefile.inc $(srcdir)/linux_power_supply/Makefile.inc $(srcdir)/litespeed/Makefile.inc $(srcdir)/logind/Makefile.inc $(srcdir)/mdstat/Makefile.inc $(srcdir)/megacli/Makefile.inc $(srcdir)/memcached/Makefile.inc $(srcdir)/mongodb/Makefile.inc $(srcdir)/monit/Makefile.inc $(srcdir)/mysql/Makefile.inc $(srcdir)/nginx/Makefile.inc $(srcdir)/nginx_plus/Makefile.inc $(srcdir)/nvidia_smi/Makefile.inc $(srcdir)/nsd/Makefile.inc $(srcdir)/ntpd/Makefile.inc $(srcdir)/ovpn_status_log/Makefile.inc $(srcdir)/openldap/Makefile.inc $(srcdir)/phpfpm/Makefile.inc $(srcdir)/portcheck/Makefile.inc $(srcdir)/postfix/Makefile.inc $(srcdir)/postgres/Makefile.inc $(srcdir)/powerdns/Makefile.inc $(srcdir)/proxysql/Makefile.inc $(srcdir)/puppet/Makefile.inc $(srcdir)/rabbitmq/Makefile.inc $(srcdir)/redis/Makefile.inc $(srcdir)/rethinkdbs/Makefile.inc $(srcdir)/retroshare/Makefile.inc $(srcdir)/samba/Makefile.inc $(srcdir)/sensors/Makefile.inc $(srcdir)/smartd_log/Makefile.inc $(srcdir)/spigotmc/Makefile.inc $(srcdir)/springboot/Makefile.inc $(srcdir)/squid/Makefile.inc $(srcdir)/tomcat/Makefile.inc $(srcdir)/tor/Makefile.inc $(srcdir)/traefik/Makefile.inc $(srcdir)/unbound/Makefile.inc $(srcdir)/uwsgi/Makefile.inc $(srcdir)/varnish/Makefile.inc $(srcdir)/w1sensor/Makefile.inc $(srcdir)/web_log/Makefile.inc:
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS)
- @$(NORMAL_INSTALL)
- @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
- done | \
- sed -e 'p;s,.*/,,;n' \
- -e 'h;s|.*|.|' \
- -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
- $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
- { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
- if ($$2 == $$4) { files[d] = files[d] " " $$1; \
- if (++n[d] == $(am__install_max)) { \
- print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
- else { print "f", d "/" $$4, $$1 } } \
- END { for (d in files) print "f", d, files[d] }' | \
- while read type dir files; do \
- if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
- test -z "$$files" || { \
- echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \
- $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \
- } \
- ; done
-
-uninstall-dist_pluginsSCRIPTS:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \
- files=`for p in $$list; do echo "$$p"; done | \
- sed -e 's,.*/,,;$(transform)'`; \
- dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir)
-install-dist_pythonSCRIPTS: $(dist_python_SCRIPTS)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_SCRIPTS)'; test -n "$(pythondir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pythondir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pythondir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
- done | \
- sed -e 'p;s,.*/,,;n' \
- -e 'h;s|.*|.|' \
- -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
- $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
- { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
- if ($$2 == $$4) { files[d] = files[d] " " $$1; \
- if (++n[d] == $(am__install_max)) { \
- print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
- else { print "f", d "/" $$4, $$1 } } \
- END { for (d in files) print "f", d, files[d] }' | \
- while read type dir files; do \
- if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
- test -z "$$files" || { \
- echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pythondir)$$dir'"; \
- $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pythondir)$$dir" || exit $$?; \
- } \
- ; done
-
-uninstall-dist_pythonSCRIPTS:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_SCRIPTS)'; test -n "$(pythondir)" || exit 0; \
- files=`for p in $$list; do echo "$$p"; done | \
- sed -e 's,.*/,,;$(transform)'`; \
- dir='$(DESTDIR)$(pythondir)'; $(am__uninstall_files_from_dir)
-install-dist_basesDATA: $(dist_bases_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_bases_DATA)'; test -n "$(basesdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(basesdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(basesdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(basesdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(basesdir)" || exit $$?; \
- done
-
-uninstall-dist_basesDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_bases_DATA)'; test -n "$(basesdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(basesdir)'; $(am__uninstall_files_from_dir)
-install-dist_bases_framework_servicesDATA: $(dist_bases_framework_services_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_bases_framework_services_DATA)'; test -n "$(bases_framework_servicesdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(bases_framework_servicesdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(bases_framework_servicesdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(bases_framework_servicesdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(bases_framework_servicesdir)" || exit $$?; \
- done
-
-uninstall-dist_bases_framework_servicesDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_bases_framework_services_DATA)'; test -n "$(bases_framework_servicesdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(bases_framework_servicesdir)'; $(am__uninstall_files_from_dir)
-install-dist_libconfigDATA: $(dist_libconfig_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \
- done
-
-uninstall-dist_libconfigDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir)
-install-dist_pythonDATA: $(dist_python_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_DATA)'; test -n "$(pythondir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pythondir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pythondir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythondir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(pythondir)" || exit $$?; \
- done
-
-uninstall-dist_pythonDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_DATA)'; test -n "$(pythondir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(pythondir)'; $(am__uninstall_files_from_dir)
-install-dist_python_urllib3DATA: $(dist_python_urllib3_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_urllib3_DATA)'; test -n "$(python_urllib3dir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3dir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(python_urllib3dir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3dir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3dir)" || exit $$?; \
- done
-
-uninstall-dist_python_urllib3DATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_urllib3_DATA)'; test -n "$(python_urllib3dir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(python_urllib3dir)'; $(am__uninstall_files_from_dir)
-install-dist_python_urllib3_backportsDATA: $(dist_python_urllib3_backports_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_urllib3_backports_DATA)'; test -n "$(python_urllib3_backportsdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_backportsdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(python_urllib3_backportsdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_backportsdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_backportsdir)" || exit $$?; \
- done
-
-uninstall-dist_python_urllib3_backportsDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_urllib3_backports_DATA)'; test -n "$(python_urllib3_backportsdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(python_urllib3_backportsdir)'; $(am__uninstall_files_from_dir)
-install-dist_python_urllib3_contribDATA: $(dist_python_urllib3_contrib_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_urllib3_contrib_DATA)'; test -n "$(python_urllib3_contribdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_contribdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(python_urllib3_contribdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_contribdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_contribdir)" || exit $$?; \
- done
-
-uninstall-dist_python_urllib3_contribDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_urllib3_contrib_DATA)'; test -n "$(python_urllib3_contribdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(python_urllib3_contribdir)'; $(am__uninstall_files_from_dir)
-install-dist_python_urllib3_packagesDATA: $(dist_python_urllib3_packages_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_urllib3_packages_DATA)'; test -n "$(python_urllib3_packagesdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_packagesdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(python_urllib3_packagesdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_packagesdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_packagesdir)" || exit $$?; \
- done
-
-uninstall-dist_python_urllib3_packagesDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_urllib3_packages_DATA)'; test -n "$(python_urllib3_packagesdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(python_urllib3_packagesdir)'; $(am__uninstall_files_from_dir)
-install-dist_python_urllib3_securetransportDATA: $(dist_python_urllib3_securetransport_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_urllib3_securetransport_DATA)'; test -n "$(python_urllib3_securetransportdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_securetransportdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(python_urllib3_securetransportdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_securetransportdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_securetransportdir)" || exit $$?; \
- done
-
-uninstall-dist_python_urllib3_securetransportDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_urllib3_securetransport_DATA)'; test -n "$(python_urllib3_securetransportdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(python_urllib3_securetransportdir)'; $(am__uninstall_files_from_dir)
-install-dist_python_urllib3_ssl_match_hostnameDATA: $(dist_python_urllib3_ssl_match_hostname_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_urllib3_ssl_match_hostname_DATA)'; test -n "$(python_urllib3_ssl_match_hostnamedir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" || exit $$?; \
- done
-
-uninstall-dist_python_urllib3_ssl_match_hostnameDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_urllib3_ssl_match_hostname_DATA)'; test -n "$(python_urllib3_ssl_match_hostnamedir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)'; $(am__uninstall_files_from_dir)
-install-dist_python_urllib3_utilDATA: $(dist_python_urllib3_util_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_urllib3_util_DATA)'; test -n "$(python_urllib3_utildir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_utildir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(python_urllib3_utildir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_utildir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_utildir)" || exit $$?; \
- done
-
-uninstall-dist_python_urllib3_utilDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_urllib3_util_DATA)'; test -n "$(python_urllib3_utildir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(python_urllib3_utildir)'; $(am__uninstall_files_from_dir)
-install-dist_pythonconfigDATA: $(dist_pythonconfig_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_pythonconfig_DATA)'; test -n "$(pythonconfigdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pythonconfigdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pythonconfigdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonconfigdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonconfigdir)" || exit $$?; \
- done
-
-uninstall-dist_pythonconfigDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_pythonconfig_DATA)'; test -n "$(pythonconfigdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(pythonconfigdir)'; $(am__uninstall_files_from_dir)
-install-dist_pythonmodulesDATA: $(dist_pythonmodules_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_pythonmodules_DATA)'; test -n "$(pythonmodulesdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pythonmodulesdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pythonmodulesdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonmodulesdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonmodulesdir)" || exit $$?; \
- done
-
-uninstall-dist_pythonmodulesDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_pythonmodules_DATA)'; test -n "$(pythonmodulesdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(pythonmodulesdir)'; $(am__uninstall_files_from_dir)
-install-dist_pythonyaml2DATA: $(dist_pythonyaml2_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_pythonyaml2_DATA)'; test -n "$(pythonyaml2dir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pythonyaml2dir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pythonyaml2dir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonyaml2dir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonyaml2dir)" || exit $$?; \
- done
-
-uninstall-dist_pythonyaml2DATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_pythonyaml2_DATA)'; test -n "$(pythonyaml2dir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(pythonyaml2dir)'; $(am__uninstall_files_from_dir)
-install-dist_pythonyaml3DATA: $(dist_pythonyaml3_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_pythonyaml3_DATA)'; test -n "$(pythonyaml3dir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pythonyaml3dir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pythonyaml3dir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonyaml3dir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonyaml3dir)" || exit $$?; \
- done
-
-uninstall-dist_pythonyaml3DATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_pythonyaml3_DATA)'; test -n "$(pythonyaml3dir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(pythonyaml3dir)'; $(am__uninstall_files_from_dir)
-install-dist_third_partyDATA: $(dist_third_party_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_third_party_DATA)'; test -n "$(third_partydir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(third_partydir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(third_partydir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(third_partydir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(third_partydir)" || exit $$?; \
- done
-
-uninstall-dist_third_partyDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_third_party_DATA)'; test -n "$(third_partydir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(third_partydir)'; $(am__uninstall_files_from_dir)
-install-dist_userpythonconfigDATA: $(dist_userpythonconfig_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_userpythonconfig_DATA)'; test -n "$(userpythonconfigdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(userpythonconfigdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(userpythonconfigdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(userpythonconfigdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(userpythonconfigdir)" || exit $$?; \
- done
-
-uninstall-dist_userpythonconfigDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_userpythonconfig_DATA)'; test -n "$(userpythonconfigdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(userpythonconfigdir)'; $(am__uninstall_files_from_dir)
-tags TAGS:
-
-ctags CTAGS:
-
-cscope cscopelist:
-
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(SCRIPTS) $(DATA)
-installdirs:
- for dir in "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(pythondir)" "$(DESTDIR)$(basesdir)" "$(DESTDIR)$(bases_framework_servicesdir)" "$(DESTDIR)$(libconfigdir)" "$(DESTDIR)$(pythondir)" "$(DESTDIR)$(python_urllib3dir)" "$(DESTDIR)$(python_urllib3_backportsdir)" "$(DESTDIR)$(python_urllib3_contribdir)" "$(DESTDIR)$(python_urllib3_packagesdir)" "$(DESTDIR)$(python_urllib3_securetransportdir)" "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" "$(DESTDIR)$(python_urllib3_utildir)" "$(DESTDIR)$(pythonconfigdir)" "$(DESTDIR)$(pythonmodulesdir)" "$(DESTDIR)$(pythonyaml2dir)" "$(DESTDIR)$(pythonyaml3dir)" "$(DESTDIR)$(third_partydir)" "$(DESTDIR)$(userpythonconfigdir)"; do \
- test -z "$$dir" || $(MKDIR_P) "$$dir"; \
- done
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
- -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-am
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-generic
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am: install-dist_basesDATA \
- install-dist_bases_framework_servicesDATA \
- install-dist_libconfigDATA install-dist_pluginsSCRIPTS \
- install-dist_pythonDATA install-dist_pythonSCRIPTS \
- install-dist_python_urllib3DATA \
- install-dist_python_urllib3_backportsDATA \
- install-dist_python_urllib3_contribDATA \
- install-dist_python_urllib3_packagesDATA \
- install-dist_python_urllib3_securetransportDATA \
- install-dist_python_urllib3_ssl_match_hostnameDATA \
- install-dist_python_urllib3_utilDATA \
- install-dist_pythonconfigDATA install-dist_pythonmodulesDATA \
- install-dist_pythonyaml2DATA install-dist_pythonyaml3DATA \
- install-dist_third_partyDATA install-dist_userpythonconfigDATA
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am:
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am: uninstall-dist_basesDATA \
- uninstall-dist_bases_framework_servicesDATA \
- uninstall-dist_libconfigDATA uninstall-dist_pluginsSCRIPTS \
- uninstall-dist_pythonDATA uninstall-dist_pythonSCRIPTS \
- uninstall-dist_python_urllib3DATA \
- uninstall-dist_python_urllib3_backportsDATA \
- uninstall-dist_python_urllib3_contribDATA \
- uninstall-dist_python_urllib3_packagesDATA \
- uninstall-dist_python_urllib3_securetransportDATA \
- uninstall-dist_python_urllib3_ssl_match_hostnameDATA \
- uninstall-dist_python_urllib3_utilDATA \
- uninstall-dist_pythonconfigDATA \
- uninstall-dist_pythonmodulesDATA \
- uninstall-dist_pythonyaml2DATA uninstall-dist_pythonyaml3DATA \
- uninstall-dist_third_partyDATA \
- uninstall-dist_userpythonconfigDATA
-
-.MAKE: install-am install-strip
-
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dist_basesDATA \
- install-dist_bases_framework_servicesDATA \
- install-dist_libconfigDATA install-dist_pluginsSCRIPTS \
- install-dist_pythonDATA install-dist_pythonSCRIPTS \
- install-dist_python_urllib3DATA \
- install-dist_python_urllib3_backportsDATA \
- install-dist_python_urllib3_contribDATA \
- install-dist_python_urllib3_packagesDATA \
- install-dist_python_urllib3_securetransportDATA \
- install-dist_python_urllib3_ssl_match_hostnameDATA \
- install-dist_python_urllib3_utilDATA \
- install-dist_pythonconfigDATA install-dist_pythonmodulesDATA \
- install-dist_pythonyaml2DATA install-dist_pythonyaml3DATA \
- install-dist_third_partyDATA install-dist_userpythonconfigDATA \
- install-dvi install-dvi-am install-exec install-exec-am \
- install-html install-html-am install-info install-info-am \
- install-man install-pdf install-pdf-am install-ps \
- install-ps-am install-strip installcheck installcheck-am \
- installdirs maintainer-clean maintainer-clean-generic \
- mostlyclean mostlyclean-generic pdf pdf-am ps ps-am tags-am \
- uninstall uninstall-am uninstall-dist_basesDATA \
- uninstall-dist_bases_framework_servicesDATA \
- uninstall-dist_libconfigDATA uninstall-dist_pluginsSCRIPTS \
- uninstall-dist_pythonDATA uninstall-dist_pythonSCRIPTS \
- uninstall-dist_python_urllib3DATA \
- uninstall-dist_python_urllib3_backportsDATA \
- uninstall-dist_python_urllib3_contribDATA \
- uninstall-dist_python_urllib3_packagesDATA \
- uninstall-dist_python_urllib3_securetransportDATA \
- uninstall-dist_python_urllib3_ssl_match_hostnameDATA \
- uninstall-dist_python_urllib3_utilDATA \
- uninstall-dist_pythonconfigDATA \
- uninstall-dist_pythonmodulesDATA \
- uninstall-dist_pythonyaml2DATA uninstall-dist_pythonyaml3DATA \
- uninstall-dist_third_partyDATA \
- uninstall-dist_userpythonconfigDATA
-
-.in:
- if sed \
- -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \
- -e 's#[@]sbindir_POST@#$(sbindir)#g' \
- -e 's#[@]sysconfdir_POST@#$(sysconfdir)#g' \
- -e 's#[@]pythondir_POST@#$(pythondir)#g' \
- -e 's#[@]configdir_POST@#$(configdir)#g' \
- -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \
- -e 's#[@]cachedir_POST@#$(cachedir)#g' \
- $< > $@.tmp; then \
- mv "$@.tmp" "$@"; \
- else \
- rm -f "$@.tmp"; \
- false; \
- fi
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/python.d.plugin/README.md b/collectors/python.d.plugin/README.md
index 673fc2c9..8955197a 100644
--- a/collectors/python.d.plugin/README.md
+++ b/collectors/python.d.plugin/README.md
@@ -9,21 +9,6 @@
5. Allows each **module** to have one or more data collection **jobs**
6. Each **job** is collecting one or more metrics from a single data source
-## Pull Request Checklist for Python Plugins
-
-This is a generic checklist for submitting a new Python plugin for Netdata. It is by no means comprehensive.
-
-At minimum, to be buildable and testable, the PR needs to include:
-
-* The module itself, following proper naming conventions: `python.d/<module_dir>/<module_name>.chart.py`
-* A README.md file for the plugin under `python.d/<module_dir>`.
-* The configuration file for the module: `conf.d/python.d/<module_name>.conf`. Python config files are in YAML format, and should include comments describing what options are present. The instructions are also needed in the configuration section of the README.md
-* A basic configuration for the plugin in the appropriate global config file: `conf.d/python.d.conf`, which is also in YAML format. Either add a line that reads `# <module_name>: yes` if the module is to be enabled by default, or one that reads `<module_name>: no` if it is to be disabled by default.
-* A line for the plugin in `python.d/Makefile.am` under `dist_python_DATA`.
-* A line for the plugin configuration file in `conf.d/Makefile.am`, under `dist_pythonconfig_DATA`
-* Optionally, chart information in `web/dashboard_info.js`. This generally involves specifying a name and icon for the section, and may include descriptions for the section or individual charts.
-
-
## Disclaimer
Every module should be compatible with python2 and python3.
@@ -36,7 +21,6 @@ Every configuration file must have one of two formats:
```yaml
update_every : 2 # update frequency
-retries : 1 # how many failures in update() is tolerated
priority : 20000 # where it is shown on dashboard
other_var1 : bla # variables passed to module
@@ -48,7 +32,6 @@ other_var2 : alb
```yaml
# module defaults:
update_every : 2
-retries : 1
priority : 20000
local: # job name
@@ -57,13 +40,25 @@ local: # job name
other_job:
priority : 5 # job position on dashboard
- retries : 20 # job retries
other_var2 : val # module specific variable
```
-`update_every`, `retries`, and `priority` are always optional.
+`update_every` and `priority` are always optional.
----
+## How to debug a python module
+
+```
+# become user netdata
+sudo su -s /bin/bash netdata
+```
+Depending on where Netdata was installed, execute one of the following commands to trace the execution of a python module:
+
+```
+# execute the plugin in debug mode, for a specific module
+/opt/netdata/usr/libexec/netdata/plugins.d/python.d.plugin <module> debug trace
+/usr/libexec/netdata/plugins.d/python.d.plugin <module> debug trace
+```
+Where `[module]` is the directory name under https://github.com/netdata/netdata/tree/master/collectors/python.d.plugin
## How to write a new module
@@ -74,7 +69,9 @@ Writing new python module is simple. You just need to remember to include 5 majo
- **_get_data** method
- all code needs to be compatible with Python 2 (**≥ 2.7**) *and* 3 (**≥ 3.1**)
-If you plan to submit the module in a PR, make sure and go through the [PR checklist for new modules](#pull-request-checklist-for-python-plugins) beforehand to make sure you have updated all the files you need to.
+If you plan to submit the module in a PR, make sure and go through the [PR checklist for new modules](#pull-request-checklist-for-python-plugins) beforehand to make sure you have updated all the files you need to.
+
+For a quick start, you can look at the [example plugin](example/example.chart.py).
### Global variables `ORDER` and `CHART`
@@ -210,3 +207,19 @@ Sockets are accessed in non-blocking mode with 15 second timeout.
After every execution of `_get_raw_data` socket is closed, to prevent this module needs to set `_keep_alive` variable to `True` and implement custom `_check_raw_data` method.
`_check_raw_data` should take raw data and return `True` if all data is received otherwise it should return `False`. Also it should do it in fast and efficient way.
+
+## Pull Request Checklist for Python Plugins
+
+This is a generic checklist for submitting a new Python plugin for Netdata. It is by no means comprehensive.
+
+At minimum, to be buildable and testable, the PR needs to include:
+
+* The module itself, following proper naming conventions: `python.d/<module_dir>/<module_name>.chart.py`
+* A README.md file for the plugin under `python.d/<module_dir>`.
+* The configuration file for the module: `conf.d/python.d/<module_name>.conf`. Python config files are in YAML format, and should include comments describing what options are present. The instructions are also needed in the configuration section of the README.md
+* A basic configuration for the plugin in the appropriate global config file: `conf.d/python.d.conf`, which is also in YAML format. Either add a line that reads `# <module_name>: yes` if the module is to be enabled by default, or one that reads `<module_name>: no` if it is to be disabled by default.
+* A line for the plugin in `python.d/Makefile.am` under `dist_python_DATA`.
+* A line for the plugin configuration file in `conf.d/Makefile.am`, under `dist_pythonconfig_DATA`
+* Optionally, chart information in `web/dashboard_info.js`. This generally involves specifying a name and icon for the section, and may include descriptions for the section or individual charts.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/adaptec_raid/README.md b/collectors/python.d.plugin/adaptec_raid/README.md
index 499dc919..682280f2 100644
--- a/collectors/python.d.plugin/adaptec_raid/README.md
+++ b/collectors/python.d.plugin/adaptec_raid/README.md
@@ -44,3 +44,5 @@ adaptec_raid: yes
![image](https://user-images.githubusercontent.com/22274335/47278133-6d306680-d601-11e8-87c2-cc9c0f42d686.png)
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fadaptec_raid%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/adaptec_raid/adaptec_raid.conf b/collectors/python.d.plugin/adaptec_raid/adaptec_raid.conf
index 253cbf5a..fa462ec8 100644
--- a/collectors/python.d.plugin/adaptec_raid/adaptec_raid.conf
+++ b/collectors/python.d.plugin/adaptec_raid/adaptec_raid.conf
@@ -19,11 +19,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -50,6 +48,6 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/apache/README.md b/collectors/python.d.plugin/apache/README.md
index c6d1d126..090feb07 100644
--- a/collectors/python.d.plugin/apache/README.md
+++ b/collectors/python.d.plugin/apache/README.md
@@ -46,14 +46,14 @@ priority : 90100
local:
url : 'http://localhost/server-status?auto'
- retries : 20
remote:
url : 'http://www.apache.org/server-status?auto'
update_every : 5
- retries : 4
```
Without configuration, module attempts to connect to `http://localhost/server-status?auto`
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fapache%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/apache/apache.chart.py b/collectors/python.d.plugin/apache/apache.chart.py
index d136274d..655616d0 100644
--- a/collectors/python.d.plugin/apache/apache.chart.py
+++ b/collectors/python.d.plugin/apache/apache.chart.py
@@ -5,64 +5,60 @@
from bases.FrameworkServices.UrlService import UrlService
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
-
-# default job configuration (overridden by python.d.plugin)
-# config = {'local': {
-# 'update_every': update_every,
-# 'retries': retries,
-# 'priority': priority,
-# 'url': 'http://www.apache.org/server-status?auto'
-# }}
-
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['requests', 'connections', 'conns_async', 'net', 'workers', 'reqpersec', 'bytespersec', 'bytesperreq']
+
+ORDER = [
+ 'requests',
+ 'connections',
+ 'conns_async',
+ 'net',
+ 'workers',
+ 'reqpersec',
+ 'bytespersec',
+ 'bytesperreq',
+]
CHARTS = {
'bytesperreq': {
- 'options': [None, 'apache Lifetime Avg. Response Size', 'bytes/request',
+ 'options': [None, 'Lifetime Avg. Request Size', 'KiB',
'statistics', 'apache.bytesperreq', 'area'],
'lines': [
- ['size_req']
+ ['size_req', 'size', 'absolute', 1, 1024 * 100000]
]},
'workers': {
- 'options': [None, 'apache Workers', 'workers', 'workers', 'apache.workers', 'stacked'],
+ 'options': [None, 'Workers', 'workers', 'workers', 'apache.workers', 'stacked'],
'lines': [
['idle'],
['busy'],
]},
'reqpersec': {
- 'options': [None, 'apache Lifetime Avg. Requests/s', 'requests/s', 'statistics',
+ 'options': [None, 'Lifetime Avg. Requests/s', 'requests/s', 'statistics',
'apache.reqpersec', 'area'],
'lines': [
- ['requests_sec']
+ ['requests_sec', 'requests', 'absolute', 1, 100000]
]},
'bytespersec': {
- 'options': [None, 'apache Lifetime Avg. Bandwidth/s', 'kilobits/s', 'statistics',
+ 'options': [None, 'Lifetime Avg. Bandwidth/s', 'kilobits/s', 'statistics',
'apache.bytesperreq', 'area'],
'lines': [
- ['size_sec', None, 'absolute', 8, 1000]
+ ['size_sec', None, 'absolute', 8, 1000 * 100000]
]},
'requests': {
- 'options': [None, 'apache Requests', 'requests/s', 'requests', 'apache.requests', 'line'],
+ 'options': [None, 'Requests', 'requests/s', 'requests', 'apache.requests', 'line'],
'lines': [
['requests', None, 'incremental']
]},
'net': {
- 'options': [None, 'apache Bandwidth', 'kilobits/s', 'bandwidth', 'apache.net', 'area'],
+ 'options': [None, 'Bandwidth', 'kilobits/s', 'bandwidth', 'apache.net', 'area'],
'lines': [
['sent', None, 'incremental', 8, 1]
]},
'connections': {
- 'options': [None, 'apache Connections', 'connections', 'connections', 'apache.connections', 'line'],
+ 'options': [None, 'Connections', 'connections', 'connections', 'apache.connections', 'line'],
'lines': [
['connections']
]},
'conns_async': {
- 'options': [None, 'apache Async Connections', 'connections', 'connections', 'apache.conns_async', 'stacked'],
+ 'options': [None, 'Async Connections', 'connections', 'connections', 'apache.conns_async', 'stacked'],
'lines': [
['keepalive'],
['closing'],
@@ -86,6 +82,14 @@ ASSIGNMENT = {
'ConnsAsyncWriting': 'writing'
}
+FLOAT_VALUES = [
+ 'BytesPerReq',
+ 'ReqPerSec',
+ 'BytesPerSec',
+]
+
+LIGHTTPD_MARKER = 'idle_servers'
+
class Service(UrlService):
def __init__(self, configuration=None, name=None):
@@ -96,20 +100,15 @@ class Service(UrlService):
def check(self):
self._manager = self._build_manager()
+
data = self._get_data()
+
if not data:
return None
- if 'idle_servers' in data:
- self.module_name = 'lighttpd'
- for chart in self.definitions:
- if chart == 'workers':
- lines = self.definitions[chart]['lines']
- lines[0] = ['idle_servers', 'idle']
- lines[1] = ['busy_servers', 'busy']
- opts = self.definitions[chart]['options']
- opts[1] = opts[1].replace('apache', 'lighttpd')
- opts[4] = opts[4].replace('apache', 'lighttpd')
+ if LIGHTTPD_MARKER in data:
+ self.turn_into_lighttpd()
+
return True
def _get_data(self):
@@ -118,15 +117,44 @@ class Service(UrlService):
:return: dict
"""
raw_data = self._get_raw_data()
+
if not raw_data:
return None
+
data = dict()
- for row in raw_data.split('\n'):
- tmp = row.split(':')
- if tmp[0] in ASSIGNMENT:
- try:
- data[ASSIGNMENT[tmp[0]]] = int(float(tmp[1]))
- except (IndexError, ValueError):
- continue
+ for line in raw_data.split('\n'):
+ try:
+ parse_line(line, data)
+ except ValueError:
+ continue
+
return data or None
+
+ def turn_into_lighttpd(self):
+ self.module_name = 'lighttpd'
+ for chart in self.definitions:
+ if chart == 'workers':
+ lines = self.definitions[chart]['lines']
+ lines[0] = ['idle_servers', 'idle']
+ lines[1] = ['busy_servers', 'busy']
+ opts = self.definitions[chart]['options']
+ opts[1] = opts[1].replace('apache', 'lighttpd')
+ opts[4] = opts[4].replace('apache', 'lighttpd')
+
+
+def parse_line(line, data):
+ parts = line.split(':')
+
+ if len(parts) != 2:
+ return
+
+ key, value = parts[0], parts[1]
+
+ if key not in ASSIGNMENT:
+ return
+
+ if key in FLOAT_VALUES:
+ data[ASSIGNMENT[key]] = int((float(value) * 100000))
+ else:
+ data[ASSIGNMENT[key]] = int(value)
diff --git a/collectors/python.d.plugin/apache/apache.conf b/collectors/python.d.plugin/apache/apache.conf
index 8b606f7e..84e12a57 100644
--- a/collectors/python.d.plugin/apache/apache.conf
+++ b/collectors/python.d.plugin/apache/apache.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, apache also supports the following:
diff --git a/collectors/python.d.plugin/beanstalk/README.md b/collectors/python.d.plugin/beanstalk/README.md
index c2d7d578..8daa3660 100644
--- a/collectors/python.d.plugin/beanstalk/README.md
+++ b/collectors/python.d.plugin/beanstalk/README.md
@@ -101,3 +101,5 @@ port : 11300
If no configuration is given, module will attempt to connect to beanstalkd on `127.0.0.1:11300` address
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fbeanstalk%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/beanstalk/beanstalk.chart.py b/collectors/python.d.plugin/beanstalk/beanstalk.chart.py
index 1472b4e1..ed945a78 100644
--- a/collectors/python.d.plugin/beanstalk/beanstalk.chart.py
+++ b/collectors/python.d.plugin/beanstalk/beanstalk.chart.py
@@ -12,13 +12,18 @@ except ImportError:
from bases.FrameworkServices.SimpleService import SimpleService
from bases.loaders import safe_load
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
-ORDER = ['cpu_usage', 'jobs_rate', 'connections_rate', 'commands_rate', 'current_tubes', 'current_jobs',
- 'current_connections', 'binlog', 'uptime']
+ORDER = [
+ 'cpu_usage',
+ 'jobs_rate',
+ 'connections_rate',
+ 'commands_rate',
+ 'current_tubes',
+ 'current_jobs',
+ 'current_connections',
+ 'binlog',
+ 'uptime',
+]
CHARTS = {
'cpu_usage': {
diff --git a/collectors/python.d.plugin/beanstalk/beanstalk.conf b/collectors/python.d.plugin/beanstalk/beanstalk.conf
index 3b11d919..7586ad26 100644
--- a/collectors/python.d.plugin/beanstalk/beanstalk.conf
+++ b/collectors/python.d.plugin/beanstalk/beanstalk.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -68,7 +66,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
# chart_cleanup: 10 # the JOB's chart cleanup interval in iterations
#
diff --git a/collectors/python.d.plugin/bind_rndc/README.md b/collectors/python.d.plugin/bind_rndc/README.md
index 688297ab..fefe7493 100644
--- a/collectors/python.d.plugin/bind_rndc/README.md
+++ b/collectors/python.d.plugin/bind_rndc/README.md
@@ -58,3 +58,5 @@ local:
If no configuration is given, module will attempt to read named.stats file at `/var/log/bind/named.stats`
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fbind_rndc%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py b/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py
index 423232f6..7ac1bc3d 100644
--- a/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py
+++ b/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py
@@ -11,11 +11,15 @@ from subprocess import Popen
from bases.collection import find_binary
from bases.FrameworkServices.SimpleService import SimpleService
-priority = 60000
-retries = 60
+
update_every = 30
-ORDER = ['name_server_statistics', 'incoming_queries', 'outgoing_queries', 'named_stats_size']
+ORDER = [
+ 'name_server_statistics',
+ 'incoming_queries',
+ 'outgoing_queries',
+ 'named_stats_size',
+]
CHARTS = {
'name_server_statistics': {
@@ -44,7 +48,7 @@ CHARTS = {
'lines': [
]},
'named_stats_size': {
- 'options': [None, 'Named Stats File Size', 'MB', 'file size', 'bind_rndc.stats_size', 'line'],
+ 'options': [None, 'Named Stats File Size', 'MiB', 'file size', 'bind_rndc.stats_size', 'line'],
'lines': [
['stats_size', None, 'absolute', 1, 1 << 20]
]
@@ -92,10 +96,20 @@ class Service(SimpleService):
self.definitions = CHARTS
self.named_stats_path = self.configuration.get('named_stats_path', '/var/log/bind/named.stats')
self.rndc = find_binary('rndc')
- self.data = dict(nms_requests=0, nms_responses=0, nms_failure=0, nms_auth=0,
- nms_non_auth=0, nms_nxrrset=0, nms_success=0, nms_nxdomain=0,
- nms_recursion=0, nms_duplicate=0, nms_rejected_queries=0,
- nms_dropped_queries=0)
+ self.data = dict(
+ nms_requests=0,
+ nms_responses=0,
+ nms_failure=0,
+ nms_auth=0,
+ nms_non_auth=0,
+ nms_nxrrset=0,
+ nms_success=0,
+ nms_nxdomain=0,
+ nms_recursion=0,
+ nms_duplicate=0,
+ nms_rejected_queries=0,
+ nms_dropped_queries=0,
+ )
def check(self):
if not self.rndc:
diff --git a/collectors/python.d.plugin/bind_rndc/bind_rndc.conf b/collectors/python.d.plugin/bind_rndc/bind_rndc.conf
index 71958ff9..3b7e9a21 100644
--- a/collectors/python.d.plugin/bind_rndc/bind_rndc.conf
+++ b/collectors/python.d.plugin/bind_rndc/bind_rndc.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, bind_rndc also supports the following:
diff --git a/collectors/python.d.plugin/boinc/README.md b/collectors/python.d.plugin/boinc/README.md
index 595bcd3c..0f0aa1c6 100644
--- a/collectors/python.d.plugin/boinc/README.md
+++ b/collectors/python.d.plugin/boinc/README.md
@@ -26,3 +26,5 @@ remote:
```
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fboinc%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/boinc/boinc.chart.py b/collectors/python.d.plugin/boinc/boinc.chart.py
index d14754c4..e10b28ce 100644
--- a/collectors/python.d.plugin/boinc/boinc.chart.py
+++ b/collectors/python.d.plugin/boinc/boinc.chart.py
@@ -10,7 +10,12 @@ from bases.FrameworkServices.SimpleService import SimpleService
from third_party import boinc_client
-ORDER = ['tasks', 'states', 'sched_states', 'process_states']
+ORDER = [
+ 'tasks',
+ 'states',
+ 'sched_states',
+ 'process_states',
+]
CHARTS = {
'tasks': {
@@ -141,14 +146,16 @@ class Service(SimpleService):
def _get_data(self):
if not self.is_alive():
return None
+
data = dict(_DATA_TEMPLATE)
- results = []
+
try:
results = self.client.get_tasks()
except socket.error:
self.error('Connection is dead')
self.alive = False
return None
+
for task in results:
data['total'] += 1
data[_TASK_MAP[task.state]] += 1
@@ -159,4 +166,5 @@ class Service(SimpleService):
data[_PROC_MAP[task.active_task_state]] += 1
except AttributeError:
pass
- return data
+
+ return data or None
diff --git a/collectors/python.d.plugin/boinc/boinc.conf b/collectors/python.d.plugin/boinc/boinc.conf
index e59d2509..16edf55c 100644
--- a/collectors/python.d.plugin/boinc/boinc.conf
+++ b/collectors/python.d.plugin/boinc/boinc.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, boinc also supports the following:
diff --git a/collectors/python.d.plugin/ceph/README.md b/collectors/python.d.plugin/ceph/README.md
index 29dfe5d1..1f067c61 100644
--- a/collectors/python.d.plugin/ceph/README.md
+++ b/collectors/python.d.plugin/ceph/README.md
@@ -30,3 +30,5 @@ local:
```
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fceph%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/ceph/ceph.chart.py b/collectors/python.d.plugin/ceph/ceph.chart.py
index 31c764d0..45b52620 100644
--- a/collectors/python.d.plugin/ceph/ceph.chart.py
+++ b/collectors/python.d.plugin/ceph/ceph.chart.py
@@ -9,14 +9,13 @@ try:
except ImportError:
CEPH = False
-import os
import json
+import os
+
from bases.FrameworkServices.SimpleService import SimpleService
# default module values (can be overridden per job in `config`)
update_every = 10
-priority = 60000
-retries = 60
ORDER = [
'general_usage',
@@ -37,7 +36,7 @@ ORDER = [
CHARTS = {
'general_usage': {
- 'options': [None, 'Ceph General Space', 'KB', 'general', 'ceph.general_usage', 'stacked'],
+ 'options': [None, 'Ceph General Space', 'KiB', 'general', 'ceph.general_usage', 'stacked'],
'lines': [
['general_available', 'avail', 'absolute'],
['general_usage', 'used', 'absolute']
@@ -50,7 +49,7 @@ CHARTS = {
]
},
'general_bytes': {
- 'options': [None, 'Ceph General Read/Write Data/s', 'KB', 'general', 'ceph.general_bytes',
+ 'options': [None, 'Ceph General Read/Write Data/s', 'KiB/s', 'general', 'ceph.general_bytes',
'area'],
'lines': [
['general_read_bytes', 'read', 'absolute', 1, 1024],
@@ -74,7 +73,7 @@ CHARTS = {
]
},
'pool_usage': {
- 'options': [None, 'Ceph Pools', 'KB', 'pool', 'ceph.pool_usage', 'line'],
+ 'options': [None, 'Ceph Pools', 'KiB', 'pool', 'ceph.pool_usage', 'line'],
'lines': []
},
'pool_objects': {
@@ -82,11 +81,11 @@ CHARTS = {
'lines': []
},
'pool_read_bytes': {
- 'options': [None, 'Ceph Read Pool Data/s', 'KB', 'pool', 'ceph.pool_read_bytes', 'area'],
+ 'options': [None, 'Ceph Read Pool Data/s', 'KiB/s', 'pool', 'ceph.pool_read_bytes', 'area'],
'lines': []
},
'pool_write_bytes': {
- 'options': [None, 'Ceph Write Pool Data/s', 'KB', 'pool', 'ceph.pool_write_bytes', 'area'],
+ 'options': [None, 'Ceph Write Pool Data/s', 'KiB/s', 'pool', 'ceph.pool_write_bytes', 'area'],
'lines': []
},
'pool_read_operations': {
@@ -98,7 +97,7 @@ CHARTS = {
'lines': []
},
'osd_usage': {
- 'options': [None, 'Ceph OSDs', 'KB', 'osd', 'ceph.osd_usage', 'line'],
+ 'options': [None, 'Ceph OSDs', 'KiB', 'osd', 'ceph.osd_usage', 'line'],
'lines': []
},
'osd_apply_latency': {
@@ -320,7 +319,7 @@ class Service(SimpleService):
return json.loads(self.cluster.mon_command(json.dumps({
'prefix': 'osd df',
'format': 'json'
- }), '')[1])
+ }), '')[1].replace('-nan', '"-nan"'))
def _get_osd_perf(self):
"""
diff --git a/collectors/python.d.plugin/ceph/ceph.conf b/collectors/python.d.plugin/ceph/ceph.conf
index 78ac1e25..4caabbf6 100644
--- a/collectors/python.d.plugin/ceph/ceph.conf
+++ b/collectors/python.d.plugin/ceph/ceph.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 10 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, ceph plugin also supports the following:
diff --git a/collectors/python.d.plugin/chrony/README.md b/collectors/python.d.plugin/chrony/README.md
index 30636fe7..67ed1a05 100644
--- a/collectors/python.d.plugin/chrony/README.md
+++ b/collectors/python.d.plugin/chrony/README.md
@@ -29,3 +29,5 @@ local:
```
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fchrony%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/chrony/chrony.chart.py b/collectors/python.d.plugin/chrony/chrony.chart.py
index fd01d4e8..91f72500 100644
--- a/collectors/python.d.plugin/chrony/chrony.chart.py
+++ b/collectors/python.d.plugin/chrony/chrony.chart.py
@@ -7,11 +7,19 @@ from bases.FrameworkServices.ExecutableService import ExecutableService
# default module values (can be overridden per job in `config`)
update_every = 5
-priority = 60000
-retries = 10
+
+CHRONY_COMMAND = 'chronyc -n tracking'
# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['system', 'offsets', 'stratum', 'root', 'frequency', 'residualfreq', 'skew']
+ORDER = [
+ 'system',
+ 'offsets',
+ 'stratum',
+ 'root',
+ 'frequency',
+ 'residualfreq',
+ 'skew',
+]
CHARTS = {
'system': {
@@ -77,9 +85,9 @@ class Service(ExecutableService):
def __init__(self, configuration=None, name=None):
ExecutableService.__init__(
self, configuration=configuration, name=name)
- self.command = 'chronyc -n tracking'
self.order = ORDER
self.definitions = CHARTS
+ self.command = CHRONY_COMMAND
def _get_data(self):
"""
diff --git a/collectors/python.d.plugin/chrony/chrony.conf b/collectors/python.d.plugin/chrony/chrony.conf
index 9ac906b5..fd95519b 100644
--- a/collectors/python.d.plugin/chrony/chrony.conf
+++ b/collectors/python.d.plugin/chrony/chrony.conf
@@ -27,11 +27,9 @@ update_every: 5
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@ update_every: 5
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, chrony also supports the following:
diff --git a/collectors/python.d.plugin/couchdb/README.md b/collectors/python.d.plugin/couchdb/README.md
index eff8c081..2cc353ed 100644
--- a/collectors/python.d.plugin/couchdb/README.md
+++ b/collectors/python.d.plugin/couchdb/README.md
@@ -33,3 +33,5 @@ local:
```
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fcouchdb%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/couchdb/couchdb.chart.py b/collectors/python.d.plugin/couchdb/couchdb.chart.py
index 5d6b9916..a58694d7 100644
--- a/collectors/python.d.plugin/couchdb/couchdb.chart.py
+++ b/collectors/python.d.plugin/couchdb/couchdb.chart.py
@@ -8,6 +8,7 @@ from collections import namedtuple, defaultdict
from json import loads
from threading import Thread
from socket import gethostbyname, gaierror
+
try:
from queue import Queue
except ImportError:
@@ -15,10 +16,9 @@ except ImportError:
from bases.FrameworkServices.UrlService import UrlService
-# default module values (can be overridden per job in `config`)
+
update_every = 1
-priority = 60000
-retries = 60
+
METHODS = namedtuple('METHODS', ['get_data', 'url', 'stats'])
@@ -109,7 +109,7 @@ ORDER = [
CHARTS = {
'activity': {
- 'options': [None, 'Overall Activity', 'req/s',
+ 'options': [None, 'Overall Activity', 'requests/s',
'dbactivity', 'couchdb.activity', 'stacked'],
'lines': [
['couchdb_database_reads', 'DB reads', 'incremental'],
@@ -118,7 +118,7 @@ CHARTS = {
]
},
'request_methods': {
- 'options': [None, 'HTTP request methods', 'req/s',
+ 'options': [None, 'HTTP request methods', 'requests/s',
'httptraffic', 'couchdb.request_methods',
'stacked'],
'lines': [
@@ -133,7 +133,7 @@ CHARTS = {
]
},
'response_codes': {
- 'options': [None, 'HTTP response status codes', 'resp/s',
+ 'options': [None, 'HTTP response status codes', 'responses/s',
'httptraffic', 'couchdb.response_codes',
'stacked'],
'lines': [
@@ -151,15 +151,13 @@ CHARTS = {
]
},
'open_files': {
- 'options': [None, 'Open files', 'files',
- 'ops', 'couchdb.open_files', 'line'],
+ 'options': [None, 'Open files', 'files', 'ops', 'couchdb.open_files', 'line'],
'lines': [
['couchdb_open_os_files', '# files', 'absolute']
]
},
'active_tasks': {
- 'options': [None, 'Active task breakdown', 'tasks',
- 'ops', 'couchdb.active_tasks', 'stacked'],
+ 'options': [None, 'Active task breakdown', 'tasks', 'ops', 'couchdb.active_tasks', 'stacked'],
'lines': [
['activetasks_indexer', 'Indexer', 'absolute'],
['activetasks_database_compaction', 'DB Compaction', 'absolute'],
@@ -168,8 +166,7 @@ CHARTS = {
]
},
'replicator_jobs': {
- 'options': [None, 'Replicator job breakdown', 'jobs',
- 'ops', 'couchdb.replicator_jobs', 'stacked'],
+ 'options': [None, 'Replicator job breakdown', 'jobs', 'ops', 'couchdb.replicator_jobs', 'stacked'],
'lines': [
['couch_replicator_jobs_running', 'Running', 'absolute'],
['couch_replicator_jobs_pending', 'Pending', 'absolute'],
@@ -179,8 +176,7 @@ CHARTS = {
]
},
'erlang_memory': {
- 'options': [None, 'Erlang VM memory usage', 'bytes',
- 'erlang', 'couchdb.erlang_vm_memory', 'stacked'],
+ 'options': [None, 'Erlang VM memory usage', 'B', 'erlang', 'couchdb.erlang_vm_memory', 'stacked'],
'lines': [
['memory_atom', 'atom', 'absolute'],
['memory_binary', 'binaries', 'absolute'],
@@ -191,23 +187,20 @@ CHARTS = {
]
},
'erlang_reductions': {
- 'options': [None, 'Erlang reductions', 'count',
- 'erlang', 'couchdb.reductions', 'line'],
+ 'options': [None, 'Erlang reductions', 'count', 'erlang', 'couchdb.reductions', 'line'],
'lines': [
['reductions', 'reductions', 'incremental']
]
},
'erlang_proc_counts': {
- 'options': [None, 'Process counts', 'count',
- 'erlang', 'couchdb.proccounts', 'line'],
+ 'options': [None, 'Process counts', 'count', 'erlang', 'couchdb.proccounts', 'line'],
'lines': [
['os_proc_count', 'OS procs', 'absolute'],
['process_count', 'erl procs', 'absolute']
]
},
'erlang_peak_msg_queue': {
- 'options': [None, 'Peak message queue size', 'count',
- 'erlang', 'couchdb.peakmsgqueue',
+ 'options': [None, 'Peak message queue size', 'count', 'erlang', 'couchdb.peakmsgqueue',
'line'],
'lines': [
['peak_msg_queue', 'peak size', 'absolute']
@@ -215,18 +208,15 @@ CHARTS = {
},
# Lines for the following are added as part of check()
'db_sizes_file': {
- 'options': [None, 'Database sizes (file)', 'KB',
- 'perdbstats', 'couchdb.db_sizes_file', 'line'],
+ 'options': [None, 'Database sizes (file)', 'KiB', 'perdbstats', 'couchdb.db_sizes_file', 'line'],
'lines': []
},
'db_sizes_external': {
- 'options': [None, 'Database sizes (external)', 'KB',
- 'perdbstats', 'couchdb.db_sizes_external', 'line'],
+ 'options': [None, 'Database sizes (external)', 'KiB', 'perdbstats', 'couchdb.db_sizes_external', 'line'],
'lines': []
},
'db_sizes_active': {
- 'options': [None, 'Database sizes (active)', 'KB',
- 'perdbstats', 'couchdb.db_sizes_active', 'line'],
+ 'options': [None, 'Database sizes (active)', 'KiB', 'perdbstats', 'couchdb.db_sizes_active', 'line'],
'lines': []
},
'db_doc_counts': {
@@ -235,8 +225,7 @@ CHARTS = {
'lines': []
},
'db_doc_del_counts': {
- 'options': [None, 'Database # of deleted docs', 'docs',
- 'perdbstats', 'couchdb_db_doc_del_count', 'line'],
+ 'options': [None, 'Database # of deleted docs', 'docs', 'perdbstats', 'couchdb_db_doc_del_count', 'line'],
'lines': []
}
}
@@ -256,7 +245,7 @@ class Service(UrlService):
try:
self.dbs = self.configuration.get('databases').split(' ')
except (KeyError, AttributeError):
- self.dbs = []
+ self.dbs = list()
def check(self):
if not (self.host and self.port):
diff --git a/collectors/python.d.plugin/couchdb/couchdb.conf b/collectors/python.d.plugin/couchdb/couchdb.conf
index 5f6e75cf..9c68be77 100644
--- a/collectors/python.d.plugin/couchdb/couchdb.conf
+++ b/collectors/python.d.plugin/couchdb/couchdb.conf
@@ -28,11 +28,9 @@ update_every: 10
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -59,7 +57,7 @@ update_every: 10
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, the couchdb plugin also supports the following:
diff --git a/collectors/python.d.plugin/cpufreq/README.md b/collectors/python.d.plugin/cpufreq/README.md
index 33891d59..f1fc1e8f 100644
--- a/collectors/python.d.plugin/cpufreq/README.md
+++ b/collectors/python.d.plugin/cpufreq/README.md
@@ -1,5 +1,10 @@
# cpufreq
+> THIS MODULE IS OBSOLETE.
+> USE THE [PROC PLUGIN](../../proc.plugin) - IT IS MORE EFFICIENT
+
+---
+
This module shows the current CPU frequency as set by the cpufreq kernel
module.
@@ -28,3 +33,5 @@ If no configuration is given, module will search for cpufreq files in `/sys/devi
Directory is also prefixed with `NETDATA_HOST_PREFIX` if specified.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fcpufreq%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/cpufreq/cpufreq.conf b/collectors/python.d.plugin/cpufreq/cpufreq.conf
index 0890245d..96c0884c 100644
--- a/collectors/python.d.plugin/cpufreq/cpufreq.conf
+++ b/collectors/python.d.plugin/cpufreq/cpufreq.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
diff --git a/collectors/python.d.plugin/cpuidle/README.md b/collectors/python.d.plugin/cpuidle/README.md
index 49516963..bb6722a1 100644
--- a/collectors/python.d.plugin/cpuidle/README.md
+++ b/collectors/python.d.plugin/cpuidle/README.md
@@ -9,3 +9,5 @@ It produces one stacked chart per CPU, showing the percentage of time spent in
each state.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fcpuidle%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/cpuidle/cpuidle.conf b/collectors/python.d.plugin/cpuidle/cpuidle.conf
index bc276fcd..25f5fed6 100644
--- a/collectors/python.d.plugin/cpuidle/cpuidle.conf
+++ b/collectors/python.d.plugin/cpuidle/cpuidle.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
diff --git a/collectors/python.d.plugin/dns_query_time/README.md b/collectors/python.d.plugin/dns_query_time/README.md
index 3703e8aa..73d70d3a 100644
--- a/collectors/python.d.plugin/dns_query_time/README.md
+++ b/collectors/python.d.plugin/dns_query_time/README.md
@@ -8,3 +8,5 @@ This module provides DNS query time statistics.
It produces one aggregate chart or one chart per DNS server, showing the query time.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fdns_query_time%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py b/collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py
index d3c3db78..4a5e0e10 100644
--- a/collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py
+++ b/collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py
@@ -28,10 +28,7 @@ except ImportError:
from bases.FrameworkServices.SimpleService import SimpleService
-# default module values (can be overridden per job in `config`)
update_every = 5
-priority = 60000
-retries = 60
class Service(SimpleService):
@@ -46,14 +43,14 @@ class Service(SimpleService):
def check(self):
if not DNS_PYTHON:
- self.error('\'python-dnspython\' package is needed to use dns_query_time.chart.py')
+ self.error("'python-dnspython' package is needed to use dns_query_time.chart.py")
return False
self.timeout = self.timeout if isinstance(self.timeout, int) else 4
if not all([self.domains, self.server_list,
isinstance(self.server_list, str), isinstance(self.domains, str)]):
- self.error('server_list and domain_list can\'t be empty')
+ self.error("server_list and domain_list can't be empty")
return False
else:
self.domains, self.server_list = self.domains.split(), self.server_list.split()
@@ -129,17 +126,27 @@ def create_charts(aggregate, server_list):
}
}
for ns in server_list:
- definitions['dns_group']['lines'].append(['_'.join(['ns', ns.replace('.', '_')]), ns, 'absolute'])
+ dim = [
+ '_'.join(['ns', ns.replace('.', '_')]),
+ ns,
+ 'absolute',
+ ]
+ definitions['dns_group']['lines'].append(dim)
return order, definitions
else:
order = [''.join(['dns_', ns.replace('.', '_')]) for ns in server_list]
definitions = dict()
+
for ns in server_list:
definitions[''.join(['dns_', ns.replace('.', '_')])] = {
'options': [None, 'DNS Response Time', 'ms', ns, 'dns_query_time.response_time', 'area'],
'lines': [
- ['_'.join(['ns', ns.replace('.', '_')]), ns, 'absolute']
+ [
+ '_'.join(['ns', ns.replace('.', '_')]),
+ ns,
+ 'absolute',
+ ]
]
}
return order, definitions
diff --git a/collectors/python.d.plugin/dns_query_time/dns_query_time.conf b/collectors/python.d.plugin/dns_query_time/dns_query_time.conf
index d32c6db8..9c0838ee 100644
--- a/collectors/python.d.plugin/dns_query_time/dns_query_time.conf
+++ b/collectors/python.d.plugin/dns_query_time/dns_query_time.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, dns_query_time also supports the following:
diff --git a/collectors/python.d.plugin/dnsdist/README.md b/collectors/python.d.plugin/dnsdist/README.md
index b646ae27..c7647a11 100644
--- a/collectors/python.d.plugin/dnsdist/README.md
+++ b/collectors/python.d.plugin/dnsdist/README.md
@@ -52,3 +52,5 @@ localhost:
```
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fdnsdist%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/dnsdist/dnsdist.chart.py b/collectors/python.d.plugin/dnsdist/dnsdist.chart.py
index 1aff3f80..d6085865 100644
--- a/collectors/python.d.plugin/dnsdist/dnsdist.chart.py
+++ b/collectors/python.d.plugin/dnsdist/dnsdist.chart.py
@@ -90,9 +90,9 @@ CHARTS = {
]
},
'servermem': {
- 'options': [None, 'DNSDIST server memory utilization', 'MB', 'server', 'dnsdist.servermem', 'area'],
+ 'options': [None, 'DNSDIST server memory utilization', 'MiB', 'server', 'dnsdist.servermem', 'area'],
'lines': [
- ['real-memory-usage', 'memory usage', 'absolute', 1, 1048576]
+ ['real-memory-usage', 'memory usage', 'absolute', 1, 1 << 20]
]
},
'query_latency': {
diff --git a/collectors/python.d.plugin/dnsdist/dnsdist.conf b/collectors/python.d.plugin/dnsdist/dnsdist.conf
index aec58b8e..324d65aa 100644
--- a/collectors/python.d.plugin/dnsdist/dnsdist.conf
+++ b/collectors/python.d.plugin/dnsdist/dnsdist.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-#retries: 600000
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
#
diff --git a/collectors/python.d.plugin/dockerd/README.md b/collectors/python.d.plugin/dockerd/README.md
index d3f60380..b09a5d59 100644
--- a/collectors/python.d.plugin/dockerd/README.md
+++ b/collectors/python.d.plugin/dockerd/README.md
@@ -3,7 +3,7 @@
Module monitor docker health metrics.
**Requirement:**
-* `docker` package
+* `docker` package, required version 3.2.0+
Following charts are drawn:
@@ -24,3 +24,5 @@ Following charts are drawn:
```
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fdockerd%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/dockerd/dockerd.chart.py b/collectors/python.d.plugin/dockerd/dockerd.chart.py
index a0d3d7e6..8bd45df9 100644
--- a/collectors/python.d.plugin/dockerd/dockerd.chart.py
+++ b/collectors/python.d.plugin/dockerd/dockerd.chart.py
@@ -10,10 +10,8 @@ except ImportError:
from bases.FrameworkServices.SimpleService import SimpleService
-# default module values (can be overridden per job in `config`)
-# update_every = 1
-priority = 60000
-retries = 60
+from distutils.version import StrictVersion
+
# charts order (can be overridden if you want less charts, or different order)
ORDER = [
@@ -24,21 +22,21 @@ ORDER = [
CHARTS = {
'running_containers': {
- 'options': [None, 'Number of running containers', 'running containers', 'running containers',
+ 'options': [None, 'Number of running containers', 'containers', 'running containers',
'docker.running_containers', 'line'],
'lines': [
['running_containers', 'running']
]
},
'healthy_containers': {
- 'options': [None, 'Number of healthy containers', 'healthy containers', 'healthy containers',
+ 'options': [None, 'Number of healthy containers', 'containers', 'healthy containers',
'docker.healthy_containers', 'line'],
'lines': [
['healthy_containers', 'healthy']
]
},
'unhealthy_containers': {
- 'options': [None, 'Number of unhealthy containers', 'unhealthy containers', 'unhealthy containers',
+ 'options': [None, 'Number of unhealthy containers', 'containers', 'unhealthy containers',
'docker.unhealthy_containers', 'line'],
'lines': [
['unhealthy_containers', 'unhealthy']
@@ -47,15 +45,26 @@ CHARTS = {
}
+MIN_REQUIRED_VERSION = '3.2.0'
+
+
class Service(SimpleService):
def __init__(self, configuration=None, name=None):
SimpleService.__init__(self, configuration=configuration, name=name)
self.order = ORDER
self.definitions = CHARTS
+ self.client = None
def check(self):
if not HAS_DOCKER:
- self.error('\'docker\' package is needed to use docker.chart.py')
+ self.error("'docker' package is needed to use dockerd module")
+ return False
+
+ if StrictVersion(docker.__version__) < StrictVersion(MIN_REQUIRED_VERSION):
+ self.error("installed 'docker' package version {0}, minimum required version {1}, please upgrade".format(
+ docker.__version__,
+ MIN_REQUIRED_VERSION,
+ ))
return False
self.client = docker.DockerClient(base_url=self.configuration.get('url', 'unix://var/run/docker.sock'))
@@ -70,6 +79,7 @@ class Service(SimpleService):
def get_data(self):
data = dict()
+
data['running_containers'] = len(self.client.containers.list(sparse=True))
data['healthy_containers'] = len(self.client.containers.list(filters={'health': 'healthy'}, sparse=True))
data['unhealthy_containers'] = len(self.client.containers.list(filters={'health': 'unhealthy'}, sparse=True))
diff --git a/collectors/python.d.plugin/dockerd/dockerd.conf b/collectors/python.d.plugin/dockerd/dockerd.conf
index 5ef17a1f..96c8ee0d 100644
--- a/collectors/python.d.plugin/dockerd/dockerd.conf
+++ b/collectors/python.d.plugin/dockerd/dockerd.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 10 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, dockerd plugin also supports the following:
diff --git a/collectors/python.d.plugin/dovecot/README.md b/collectors/python.d.plugin/dovecot/README.md
index 50950ecc..de8788b3 100644
--- a/collectors/python.d.plugin/dovecot/README.md
+++ b/collectors/python.d.plugin/dovecot/README.md
@@ -1,9 +1,13 @@
# dovecot
This module provides statistics information from Dovecot server.
+
Statistics are taken from dovecot socket by executing `EXPORT global` command.
More information about dovecot stats can be found on [project wiki page.](http://wiki2.dovecot.org/Statistics)
+Module isn't compatible with new statistic api (v2.3), but you are still able to use the module with Dovecot v2.3
+by following [upgrading steps.](https://wiki2.dovecot.org/Upgrading/2.3).
+
**Requirement:**
Dovecot UNIX socket with R/W permissions for user netdata or Dovecot with configured TCP/IP socket.
@@ -71,3 +75,5 @@ localsocket:
If no configuration is given, module will attempt to connect to dovecot using unix socket localized in `/var/run/dovecot/stats`
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fdovecot%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/dovecot/dovecot.chart.py b/collectors/python.d.plugin/dovecot/dovecot.chart.py
index 7fee3bfa..be1fa53d 100644
--- a/collectors/python.d.plugin/dovecot/dovecot.chart.py
+++ b/collectors/python.d.plugin/dovecot/dovecot.chart.py
@@ -5,12 +5,10 @@
from bases.FrameworkServices.SocketService import SocketService
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
-# charts order (can be overridden if you want less charts, or different order)
+UNIX_SOCKET = '/var/run/dovecot/stats'
+
+
ORDER = [
'sessions',
'logins',
@@ -53,14 +51,14 @@ CHARTS = {
]
},
'context_switches': {
- 'options': [None, 'Dovecot Context Switches', '', 'context switches', 'dovecot.context_switches', 'line'],
+ 'options': [None, 'Dovecot Context Switches', 'switches', 'context switches', 'dovecot.context_switches', 'line'],
'lines': [
['vol_cs', 'voluntary', 'absolute'],
['invol_cs', 'involuntary', 'absolute']
]
},
'io': {
- 'options': [None, 'Dovecot Disk I/O', 'kilobytes/s', 'disk', 'dovecot.io', 'area'],
+ 'options': [None, 'Dovecot Disk I/O', 'KiB/s', 'disk', 'dovecot.io', 'area'],
'lines': [
['disk_input', 'read', 'incremental', 1, 1024],
['disk_output', 'write', 'incremental', -1, 1024]
@@ -69,8 +67,8 @@ CHARTS = {
'net': {
'options': [None, 'Dovecot Network Bandwidth', 'kilobits/s', 'network', 'dovecot.net', 'area'],
'lines': [
- ['read_bytes', 'read', 'incremental', 8, 1024],
- ['write_bytes', 'write', 'incremental', -8, 1024]
+ ['read_bytes', 'read', 'incremental', 8, 1000],
+ ['write_bytes', 'write', 'incremental', -8, 1000]
]
},
'syscalls': {
@@ -113,13 +111,12 @@ CHARTS = {
class Service(SocketService):
def __init__(self, configuration=None, name=None):
SocketService.__init__(self, configuration=configuration, name=name)
- self.request = 'EXPORT\tglobal\r\n'
- self.host = None # localhost
- self.port = None # 24242
- # self._keep_alive = True
- self.unix_socket = '/var/run/dovecot/stats'
self.order = ORDER
self.definitions = CHARTS
+ self.host = None # localhost
+ self.port = None # 24242
+ self.unix_socket = UNIX_SOCKET
+ self.request = 'EXPORT\tglobal\r\n'
def _get_data(self):
"""
diff --git a/collectors/python.d.plugin/dovecot/dovecot.conf b/collectors/python.d.plugin/dovecot/dovecot.conf
index 56c39499..451dbc9a 100644
--- a/collectors/python.d.plugin/dovecot/dovecot.conf
+++ b/collectors/python.d.plugin/dovecot/dovecot.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, dovecot also supports the following:
@@ -94,3 +92,7 @@ localsocket:
name : 'local'
socket : '/var/run/dovecot/stats'
+localsocket_old:
+ name : 'local'
+ socket : '/var/run/dovecot/old-stats'
+
diff --git a/collectors/python.d.plugin/elasticsearch/README.md b/collectors/python.d.plugin/elasticsearch/README.md
index 7ce6c0b7..6d25b02d 100644
--- a/collectors/python.d.plugin/elasticsearch/README.md
+++ b/collectors/python.d.plugin/elasticsearch/README.md
@@ -58,3 +58,5 @@ local:
If no configuration is given, module will fail to run.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Felasticsearch%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py b/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py
index 3f431f6e..f1ea03fe 100644
--- a/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py
+++ b/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py
@@ -159,17 +159,20 @@ ORDER = [
'fielddata_evictions_tripped',
'cluster_health_status',
'cluster_health_nodes',
+ 'cluster_health_pending_tasks',
+ 'cluster_health_flight_fetch',
'cluster_health_shards',
'cluster_stats_nodes',
'cluster_stats_query_cache',
'cluster_stats_docs',
'cluster_stats_store',
- 'cluster_stats_indices_shards',
+ 'cluster_stats_indices',
+ 'cluster_stats_shards_total',
]
CHARTS = {
'search_performance_total': {
- 'options': [None, 'Queries And Fetches', 'number of', 'search performance',
+ 'options': [None, 'Queries And Fetches', 'events/s', 'search performance',
'elastic.search_performance_total', 'stacked'],
'lines': [
['indices_search_query_total', 'queries', 'incremental'],
@@ -177,7 +180,7 @@ CHARTS = {
]
},
'search_performance_current': {
- 'options': [None, 'Queries and Fetches In Progress', 'number of', 'search performance',
+ 'options': [None, 'Queries and Fetches In Progress', 'events', 'search performance',
'elastic.search_performance_current', 'stacked'],
'lines': [
['indices_search_query_current', 'queries', 'absolute'],
@@ -193,14 +196,14 @@ CHARTS = {
]
},
'search_latency': {
- 'options': [None, 'Query And Fetch Latency', 'ms', 'search performance', 'elastic.search_latency', 'stacked'],
+ 'options': [None, 'Query And Fetch Latency', 'milliseconds', 'search performance', 'elastic.search_latency', 'stacked'],
'lines': [
['query_latency', 'query', 'absolute', 1, 1000],
['fetch_latency', 'fetch', 'absolute', 1, 1000]
]
},
'index_performance_total': {
- 'options': [None, 'Indexed Documents, Index Refreshes, Index Flushes To Disk', 'number of',
+ 'options': [None, 'Indexed Documents, Index Refreshes, Index Flushes To Disk', 'events/s',
'indexing performance', 'elastic.index_performance_total', 'stacked'],
'lines': [
['indices_indexing_index_total', 'indexed', 'incremental'],
@@ -225,7 +228,7 @@ CHARTS = {
]
},
'index_latency': {
- 'options': [None, 'Indexing And Flushing Latency', 'ms', 'indexing performance',
+ 'options': [None, 'Indexing And Flushing Latency', 'milliseconds', 'indexing performance',
'elastic.index_latency', 'stacked'],
'lines': [
['indexing_latency', 'indexing', 'absolute', 1, 1000],
@@ -233,7 +236,7 @@ CHARTS = {
]
},
'index_translog_operations': {
- 'options': [None, 'Translog Operations', 'count', 'translog',
+ 'options': [None, 'Translog Operations', 'operations', 'translog',
'elastic.index_translog_operations', 'area'],
'lines': [
['indices_translog_operations', 'total', 'absolute'],
@@ -241,7 +244,7 @@ CHARTS = {
]
},
'index_translog_size': {
- 'options': [None, 'Translog Size', 'MB', 'translog',
+ 'options': [None, 'Translog Size', 'MiB', 'translog',
'elastic.index_translog_size', 'area'],
'lines': [
['indices_translog_size_in_bytes', 'total', 'absolute', 1, 1048567],
@@ -249,21 +252,21 @@ CHARTS = {
]
},
'index_segments_count': {
- 'options': [None, 'Total Number Of Indices Segments', 'count', 'indices segments',
+ 'options': [None, 'Total Number Of Indices Segments', 'segments', 'indices segments',
'elastic.index_segments_count', 'line'],
'lines': [
['indices_segments_count', 'segments', 'absolute']
]
},
'index_segments_memory_writer': {
- 'options': [None, 'Index Writer Memory Usage', 'MB', 'indices segments',
+ 'options': [None, 'Index Writer Memory Usage', 'MiB', 'indices segments',
'elastic.index_segments_memory_writer', 'area'],
'lines': [
['indices_segments_index_writer_memory_in_bytes', 'total', 'absolute', 1, 1048567]
]
},
'index_segments_memory': {
- 'options': [None, 'Indices Segments Memory Usage', 'MB', 'indices segments',
+ 'options': [None, 'Indices Segments Memory Usage', 'MiB', 'indices segments',
'elastic.index_segments_memory', 'stacked'],
'lines': [
['indices_segments_terms_memory_in_bytes', 'terms', 'absolute', 1, 1048567],
@@ -277,14 +280,14 @@ CHARTS = {
]
},
'jvm_mem_heap': {
- 'options': [None, 'JVM Heap Percentage Currently in Use', 'percent', 'memory usage and gc',
+ 'options': [None, 'JVM Heap Percentage Currently in Use', 'percentage', 'memory usage and gc',
'elastic.jvm_heap', 'area'],
'lines': [
['jvm_mem_heap_used_percent', 'inuse', 'absolute']
]
},
'jvm_mem_heap_bytes': {
- 'options': [None, 'JVM Heap Commit And Usage', 'MB', 'memory usage and gc',
+ 'options': [None, 'JVM Heap Commit And Usage', 'MiB', 'memory usage and gc',
'elastic.jvm_heap_bytes', 'area'],
'lines': [
['jvm_mem_heap_committed_in_bytes', 'commited', 'absolute', 1, 1048576],
@@ -292,7 +295,7 @@ CHARTS = {
]
},
'jvm_buffer_pool_count': {
- 'options': [None, 'JVM Buffers', 'count', 'memory usage and gc',
+ 'options': [None, 'JVM Buffers', 'pools', 'memory usage and gc',
'elastic.jvm_buffer_pool_count', 'line'],
'lines': [
['jvm_buffer_pools_direct_count', 'direct', 'absolute'],
@@ -300,7 +303,7 @@ CHARTS = {
]
},
'jvm_direct_buffers_memory': {
- 'options': [None, 'JVM Direct Buffers Memory', 'MB', 'memory usage and gc',
+ 'options': [None, 'JVM Direct Buffers Memory', 'MiB', 'memory usage and gc',
'elastic.jvm_direct_buffers_memory', 'area'],
'lines': [
['jvm_buffer_pools_direct_used_in_bytes', 'used', 'absolute', 1, 1048567],
@@ -308,7 +311,7 @@ CHARTS = {
]
},
'jvm_mapped_buffers_memory': {
- 'options': [None, 'JVM Mapped Buffers Memory', 'MB', 'memory usage and gc',
+ 'options': [None, 'JVM Mapped Buffers Memory', 'MiB', 'memory usage and gc',
'elastic.jvm_mapped_buffers_memory', 'area'],
'lines': [
['jvm_buffer_pools_mapped_used_in_bytes', 'used', 'absolute', 1, 1048567],
@@ -316,14 +319,14 @@ CHARTS = {
]
},
'jvm_gc_count': {
- 'options': [None, 'Garbage Collections', 'counts', 'memory usage and gc', 'elastic.gc_count', 'stacked'],
+ 'options': [None, 'Garbage Collections', 'events/s', 'memory usage and gc', 'elastic.gc_count', 'stacked'],
'lines': [
['jvm_gc_collectors_young_collection_count', 'young', 'incremental'],
['jvm_gc_collectors_old_collection_count', 'old', 'incremental']
]
},
'jvm_gc_time': {
- 'options': [None, 'Time Spent On Garbage Collections', 'ms', 'memory usage and gc',
+ 'options': [None, 'Time Spent On Garbage Collections', 'milliseconds', 'memory usage and gc',
'elastic.gc_time', 'stacked'],
'lines': [
['jvm_gc_collectors_young_collection_time_in_millis', 'young', 'incremental'],
@@ -353,13 +356,13 @@ CHARTS = {
]
},
'fielddata_cache': {
- 'options': [None, 'Fielddata Cache', 'MB', 'fielddata cache', 'elastic.fielddata_cache', 'line'],
+ 'options': [None, 'Fielddata Cache', 'MiB', 'fielddata cache', 'elastic.fielddata_cache', 'line'],
'lines': [
['indices_fielddata_memory_size_in_bytes', 'cache', 'absolute', 1, 1048576]
]
},
'fielddata_evictions_tripped': {
- 'options': [None, 'Fielddata Evictions And Circuit Breaker Tripped Count', 'number of events',
+ 'options': [None, 'Fielddata Evictions And Circuit Breaker Tripped Count', 'events/s',
'fielddata cache', 'elastic.fielddata_evictions_tripped', 'line'],
'lines': [
['indices_fielddata_evictions', 'evictions', 'incremental'],
@@ -367,12 +370,24 @@ CHARTS = {
]
},
'cluster_health_nodes': {
- 'options': [None, 'Nodes And Tasks Statistics', 'units', 'cluster health API',
+ 'options': [None, 'Nodes Statistics', 'nodes', 'cluster health API',
'elastic.cluster_health_nodes', 'stacked'],
'lines': [
['number_of_nodes', 'nodes', 'absolute'],
['number_of_data_nodes', 'data_nodes', 'absolute'],
+ ]
+ },
+ 'cluster_health_pending_tasks': {
+ 'options': [None, 'Tasks Statistics', 'tasks', 'cluster health API',
+ 'elastic.cluster_health_pending_tasks', 'line'],
+ 'lines': [
['number_of_pending_tasks', 'pending_tasks', 'absolute'],
+ ]
+ },
+ 'cluster_health_flight_fetch': {
+ 'options': [None, 'In Flight Fetches Statistics', 'fetches', 'cluster health API',
+ 'elastic.cluster_health_flight_fetch', 'line'],
+ 'lines': [
['number_of_in_flight_fetch', 'in_flight_fetch', 'absolute']
]
},
@@ -420,24 +435,30 @@ CHARTS = {
]
},
'cluster_stats_docs': {
- 'options': [None, 'Docs Statistics', 'count', 'cluster stats API',
+ 'options': [None, 'Docs Statistics', 'docs', 'cluster stats API',
'elastic.cluster_docs', 'line'],
'lines': [
['indices_docs_count', 'docs', 'absolute']
]
},
'cluster_stats_store': {
- 'options': [None, 'Store Statistics', 'MB', 'cluster stats API',
+ 'options': [None, 'Store Statistics', 'MiB', 'cluster stats API',
'elastic.cluster_store', 'line'],
'lines': [
['indices_store_size_in_bytes', 'size', 'absolute', 1, 1048567]
]
},
- 'cluster_stats_indices_shards': {
- 'options': [None, 'Indices And Shards Statistics', 'count', 'cluster stats API',
- 'elastic.cluster_indices_shards', 'stacked'],
+ 'cluster_stats_indices': {
+ 'options': [None, 'Indices Statistics', 'indices', 'cluster stats API',
+ 'elastic.cluster_indices', 'line'],
'lines': [
['indices_count', 'indices', 'absolute'],
+ ]
+ },
+ 'cluster_stats_shards_total': {
+ 'options': [None, 'Total Shards Statistics', 'shards', 'cluster stats API',
+ 'elastic.cluster_shards_total', 'line'],
+ 'lines': [
['indices_shards_total', 'shards', 'absolute']
]
},
@@ -450,7 +471,7 @@ CHARTS = {
]
},
'host_metrics_file_descriptors': {
- 'options': [None, 'Available File Descriptors In Percent', 'percent', 'host metrics',
+ 'options': [None, 'Available File Descriptors In Percent', 'percentage', 'host metrics',
'elastic.host_descriptors', 'area'],
'lines': [
['file_descriptors_used', 'used', 'absolute', 1, 10]
@@ -473,9 +494,11 @@ class Service(UrlService):
self.definitions = CHARTS
self.host = self.configuration.get('host')
self.port = self.configuration.get('port', 9200)
- self.url = '{scheme}://{host}:{port}'.format(scheme=self.configuration.get('scheme', 'http'),
- host=self.host,
- port=self.port)
+ self.url = '{scheme}://{host}:{port}'.format(
+ scheme=self.configuration.get('scheme', 'http'),
+ host=self.host,
+ port=self.port,
+ )
self.latency = dict()
self.methods = list()
diff --git a/collectors/python.d.plugin/elasticsearch/elasticsearch.conf b/collectors/python.d.plugin/elasticsearch/elasticsearch.conf
index 213843bf..e5c97e7e 100644
--- a/collectors/python.d.plugin/elasticsearch/elasticsearch.conf
+++ b/collectors/python.d.plugin/elasticsearch/elasticsearch.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, elasticsearch plugin also supports the following:
diff --git a/collectors/python.d.plugin/example/README.md b/collectors/python.d.plugin/example/README.md
index f9f314ac..d65f8cf9 100644
--- a/collectors/python.d.plugin/example/README.md
+++ b/collectors/python.d.plugin/example/README.md
@@ -1 +1,5 @@
-An example python data collection module. \ No newline at end of file
+# example
+
+An example python data collection module.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fexample%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/example/example.chart.py b/collectors/python.d.plugin/example/example.chart.py
index 85defa4d..cc8c1875 100644
--- a/collectors/python.d.plugin/example/example.chart.py
+++ b/collectors/python.d.plugin/example/example.chart.py
@@ -7,12 +7,13 @@ from random import SystemRandom
from bases.FrameworkServices.SimpleService import SimpleService
-# default module values
-# update_every = 4
+
priority = 90000
-retries = 60
-ORDER = ['random']
+ORDER = [
+ 'random',
+]
+
CHARTS = {
'random': {
'options': [None, 'A random number', 'random number', 'random', 'random', 'line'],
diff --git a/collectors/python.d.plugin/example/example.conf b/collectors/python.d.plugin/example/example.conf
index e7fed9b5..3d843517 100644
--- a/collectors/python.d.plugin/example/example.conf
+++ b/collectors/python.d.plugin/example/example.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, example also supports the following:
diff --git a/collectors/python.d.plugin/exim/README.md b/collectors/python.d.plugin/exim/README.md
index b9a62cad..1cebb27f 100644
--- a/collectors/python.d.plugin/exim/README.md
+++ b/collectors/python.d.plugin/exim/README.md
@@ -11,3 +11,5 @@ It produces only one chart:
Configuration is not needed.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fexim%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/exim/exim.chart.py b/collectors/python.d.plugin/exim/exim.chart.py
index 5431dd46..68b7b5cf 100644
--- a/collectors/python.d.plugin/exim/exim.chart.py
+++ b/collectors/python.d.plugin/exim/exim.chart.py
@@ -5,13 +5,12 @@
from bases.FrameworkServices.ExecutableService import ExecutableService
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['qemails']
+EXIM_COMMAND = 'exim -bpc'
+
+ORDER = [
+ 'qemails',
+]
CHARTS = {
'qemails': {
@@ -26,9 +25,9 @@ CHARTS = {
class Service(ExecutableService):
def __init__(self, configuration=None, name=None):
ExecutableService.__init__(self, configuration=configuration, name=name)
- self.command = 'exim -bpc'
self.order = ORDER
self.definitions = CHARTS
+ self.command = EXIM_COMMAND
def _get_data(self):
"""
diff --git a/collectors/python.d.plugin/exim/exim.conf b/collectors/python.d.plugin/exim/exim.conf
index 2add7b2c..3b7e6592 100644
--- a/collectors/python.d.plugin/exim/exim.conf
+++ b/collectors/python.d.plugin/exim/exim.conf
@@ -28,11 +28,9 @@ update_every: 10
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -59,7 +57,7 @@ update_every: 10
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, exim also supports the following:
diff --git a/collectors/python.d.plugin/fail2ban/README.md b/collectors/python.d.plugin/fail2ban/README.md
index 2ab02196..26511986 100644
--- a/collectors/python.d.plugin/fail2ban/README.md
+++ b/collectors/python.d.plugin/fail2ban/README.md
@@ -21,3 +21,5 @@ If no configuration is given, module will attempt to read log file at `/var/log/
If conf file is not found default jail is `ssh`.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Ffail2ban%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/fail2ban/fail2ban.chart.py b/collectors/python.d.plugin/fail2ban/fail2ban.chart.py
index 95468900..dfd2feab 100644
--- a/collectors/python.d.plugin/fail2ban/fail2ban.chart.py
+++ b/collectors/python.d.plugin/fail2ban/fail2ban.chart.py
@@ -35,8 +35,19 @@ def charts(jails):
},
}
for jail in jails:
- ch[ORDER[0]]['lines'].append([jail, jail, 'incremental'])
- ch[ORDER[1]]['lines'].append(['{0}_in_jail'.format(jail), jail, 'absolute'])
+ dim = [
+ jail,
+ jail,
+ 'incremental',
+ ]
+ ch[ORDER[0]]['lines'].append(dim)
+
+ dim = [
+ '{0}_in_jail'.format(jail),
+ jail,
+ 'absolute',
+ ]
+ ch[ORDER[1]]['lines'].append(dim)
return ch
@@ -46,7 +57,8 @@ RE_JAILS = re.compile(r'\[([a-zA-Z0-9_-]+)\][^\[\]]+?enabled\s+= (true|false)')
# Example:
# 2018-09-12 11:45:53,715 fail2ban.actions[25029]: WARNING [ssh] Unban 195.201.88.33
# 2018-09-12 11:45:58,727 fail2ban.actions[25029]: WARNING [ssh] Ban 217.59.246.27
-RE_DATA = re.compile(r'\[(?P<jail>[A-Za-z-_0-9]+)\] (?P<action>Unban|Ban) (?P<ip>[a-f0-9.:]+)')
+# 2018-09-12 11:45:58,727 fail2ban.actions[25029]: WARNING [ssh] Restore Ban 217.59.246.27
+RE_DATA = re.compile(r'\[(?P<jail>[A-Za-z-_0-9]+)\] (?P<action>Unban|Ban|Restore Ban) (?P<ip>[a-f0-9.:]+)')
DEFAULT_JAILS = [
'ssh',
@@ -58,12 +70,10 @@ class Service(LogService):
LogService.__init__(self, configuration=configuration, name=name)
self.order = ORDER
self.definitions = dict()
-
self.log_path = self.configuration.get('log_path', '/var/log/fail2ban.log')
self.conf_path = self.configuration.get('conf_path', '/etc/fail2ban/jail.local')
self.conf_dir = self.configuration.get('conf_dir', '/etc/fail2ban/jail.d/')
self.exclude = self.configuration.get('exclude', str())
-
self.monitoring_jails = list()
self.banned_ips = defaultdict(set)
self.data = dict()
@@ -116,7 +126,7 @@ class Service(LogService):
jail, action, ip = match['jail'], match['action'], match['ip']
- if action == 'Ban':
+ if action == 'Ban' or action == 'Restore Ban':
self.data[jail] += 1
if ip not in self.banned_ips[jail]:
self.banned_ips[jail].add(ip)
@@ -126,7 +136,7 @@ class Service(LogService):
self.banned_ips[jail].remove(ip)
self.data['{0}_in_jail'.format(jail)] -= 1
- return self.data
+ return self.data
def get_files_from_dir(self, dir_path, suffix):
"""
diff --git a/collectors/python.d.plugin/fail2ban/fail2ban.conf b/collectors/python.d.plugin/fail2ban/fail2ban.conf
index 60ca8723..a36436b5 100644
--- a/collectors/python.d.plugin/fail2ban/fail2ban.conf
+++ b/collectors/python.d.plugin/fail2ban/fail2ban.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, fail2ban also supports the following:
diff --git a/collectors/python.d.plugin/freeradius/README.md b/collectors/python.d.plugin/freeradius/README.md
index e5fe88ec..00eb50df 100644
--- a/collectors/python.d.plugin/freeradius/README.md
+++ b/collectors/python.d.plugin/freeradius/README.md
@@ -68,3 +68,5 @@ To do this, create a link from the sites-enabled directory to the status file in
and restart/reload your FREERADIUS server.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Ffreeradius%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/freeradius/freeradius.chart.py b/collectors/python.d.plugin/freeradius/freeradius.chart.py
index 3126831b..8563660c 100644
--- a/collectors/python.d.plugin/freeradius/freeradius.chart.py
+++ b/collectors/python.d.plugin/freeradius/freeradius.chart.py
@@ -3,25 +3,37 @@
# Author: l2isbad
# SPDX-License-Identifier: GPL-3.0-or-later
-from re import findall
+import re
from subprocess import Popen, PIPE
from bases.collection import find_binary
from bases.FrameworkServices.SimpleService import SimpleService
-# default module values (can be overridden per job in `config`)
-priority = 60000
-retries = 60
update_every = 15
+PARSER = re.compile(r'((?<=-)[AP][a-zA-Z-]+) = (\d+)')
+
RADIUS_MSG = 'Message-Authenticator = 0x00, FreeRADIUS-Statistics-Type = 15, Response-Packet-Type = Access-Accept'
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['authentication', 'accounting', 'proxy-auth', 'proxy-acct']
+RADCLIENT_RETRIES = 1
+RADCLIENT_TIMEOUT = 1
+
+DEFAULT_HOST = 'localhost'
+DEFAULT_PORT = 18121
+DEFAULT_DO_ACCT = False
+DEFAULT_DO_PROXY_AUTH = False
+DEFAULT_DO_PROXY_ACCT = False
+
+ORDER = [
+ 'authentication',
+ 'accounting',
+ 'proxy-auth',
+ 'proxy-acct',
+]
CHARTS = {
'authentication': {
- 'options': [None, 'Authentication', 'packets/s', 'Authentication', 'freerad.auth', 'line'],
+ 'options': [None, 'Authentication', 'packets/s', 'authentication', 'freerad.auth', 'line'],
'lines': [
['access-accepts', None, 'incremental'],
['access-rejects', None, 'incremental'],
@@ -33,7 +45,7 @@ CHARTS = {
]
},
'accounting': {
- 'options': [None, 'Accounting', 'packets/s', 'Accounting', 'freerad.acct', 'line'],
+ 'options': [None, 'Accounting', 'packets/s', 'accounting', 'freerad.acct', 'line'],
'lines': [
['accounting-requests', 'requests', 'incremental'],
['accounting-responses', 'responses', 'incremental'],
@@ -45,7 +57,7 @@ CHARTS = {
]
},
'proxy-auth': {
- 'options': [None, 'Proxy Authentication', 'packets/s', 'Authentication', 'freerad.proxy.auth', 'line'],
+ 'options': [None, 'Proxy Authentication', 'packets/s', 'authentication', 'freerad.proxy.auth', 'line'],
'lines': [
['proxy-access-accepts', 'access-accepts', 'incremental'],
['proxy-access-rejects', 'access-rejects', 'incremental'],
@@ -57,7 +69,7 @@ CHARTS = {
]
},
'proxy-acct': {
- 'options': [None, 'Proxy Accounting', 'packets/s', 'Accounting', 'freerad.proxy.acct', 'line'],
+ 'options': [None, 'Proxy Accounting', 'packets/s', 'accounting', 'freerad.proxy.acct', 'line'],
'lines': [
['proxy-accounting-requests', 'requests', 'incremental'],
['proxy-accounting-responses', 'responses', 'incremental'],
@@ -71,46 +83,80 @@ CHARTS = {
}
+def radclient_status(radclient, retries, timeout, host, port, secret):
+ # radclient -r 1 -t 1 -x 127.0.0.1:18121 status secret
+
+ return '{radclient} -r {num_retries} -t {timeout} -x {host}:{port} status {secret}'.format(
+ radclient=radclient,
+ num_retries=retries,
+ timeout=timeout,
+ host=host,
+ port=port,
+ secret=secret,
+ ).split()
+
+
class Service(SimpleService):
def __init__(self, configuration=None, name=None):
SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
self.definitions = CHARTS
- self.host = self.configuration.get('host', 'localhost')
- self.port = self.configuration.get('port', '18121')
+ self.host = self.configuration.get('host', DEFAULT_HOST)
+ self.port = self.configuration.get('port', DEFAULT_PORT)
self.secret = self.configuration.get('secret')
- self.acct = self.configuration.get('acct', False)
- self.proxy_auth = self.configuration.get('proxy_auth', False)
- self.proxy_acct = self.configuration.get('proxy_acct', False)
- chart_choice = [True, bool(self.acct), bool(self.proxy_auth), bool(self.proxy_acct)]
- self.order = [chart for chart, choice in zip(ORDER, chart_choice) if choice]
+ self.do_acct = self.configuration.get('acct', DEFAULT_DO_ACCT)
+ self.do_proxy_auth = self.configuration.get('proxy_auth', DEFAULT_DO_PROXY_AUTH)
+ self.do_proxy_acct = self.configuration.get('proxy_acct', DEFAULT_DO_PROXY_ACCT)
self.echo = find_binary('echo')
self.radclient = find_binary('radclient')
self.sub_echo = [self.echo, RADIUS_MSG]
- self.sub_radclient = [self.radclient, '-r', '1', '-t', '1', '-x',
- ':'.join([self.host, self.port]), 'status', self.secret]
+ self.sub_radclient = radclient_status(
+ self.radclient, RADCLIENT_RETRIES, RADCLIENT_TIMEOUT, self.host, self.port, self.secret,
+ )
def check(self):
- if not all([self.echo, self.radclient]):
- self.error('Can\'t locate "radclient" binary or binary is not executable by netdata')
+ if not self.radclient:
+ self.error("Can't locate 'radclient' binary or binary is not executable by netdata user")
return False
+
+ if not self.echo:
+ self.error("Can't locate 'echo' binary or binary is not executable by netdata user")
+ return None
+
if not self.secret:
- self.error('"secret" not set')
+ self.error("'secret' isn't set")
return None
- if self._get_raw_data():
- return True
- self.error('Request returned no data. Is server alive?')
- return False
+ if not self.get_raw_data():
+ self.error('Request returned no data. Is server alive?')
+ return False
- def _get_data(self):
+ if not self.do_acct:
+ self.order.remove('accounting')
+
+ if not self.do_proxy_auth:
+ self.order.remove('proxy-auth')
+
+ if not self.do_proxy_acct:
+ self.order.remove('proxy-acct')
+
+ return True
+
+ def get_data(self):
"""
Format data received from shell command
:return: dict
"""
- result = self._get_raw_data()
- return dict([(elem[0].lower(), int(elem[1])) for elem in findall(r'((?<=-)[AP][a-zA-Z-]+) = (\d+)', result)])
+ result = self.get_raw_data()
- def _get_raw_data(self):
+ if not result:
+ return None
+
+ return dict(
+ (key.lower(), value) for key, value in PARSER.findall(result)
+ )
+
+ def get_raw_data(self):
"""
The following code is equivalent to
'echo "Message-Authenticator = 0x00, FreeRADIUS-Statistics-Type = 15, Response-Packet-Type = Access-Accept"
@@ -124,6 +170,8 @@ class Service(SimpleService):
raw_result = process_rad.communicate()[0]
except OSError:
return None
+
if process_rad.returncode is 0:
return raw_result.decode()
+
return None
diff --git a/collectors/python.d.plugin/freeradius/freeradius.conf b/collectors/python.d.plugin/freeradius/freeradius.conf
index 3336d4c4..74b27377 100644
--- a/collectors/python.d.plugin/freeradius/freeradius.conf
+++ b/collectors/python.d.plugin/freeradius/freeradius.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, freeradius also supports the following:
diff --git a/collectors/python.d.plugin/go_expvar/README.md b/collectors/python.d.plugin/go_expvar/README.md
index e3356e1f..3942a7be 100644
--- a/collectors/python.d.plugin/go_expvar/README.md
+++ b/collectors/python.d.plugin/go_expvar/README.md
@@ -169,7 +169,6 @@ and its base `UrlService` class. These are:
update_every: 1 # the job's data collection frequency
priority: 60000 # the job's order on the dashboard
- retries: 60 # the job's number of restoration attempts
user: admin # use when the expvar endpoint is protected by HTTP Basic Auth
password: sekret # use when the expvar endpoint is protected by HTTP Basic Auth
@@ -274,3 +273,5 @@ The images below show how do the final charts in netdata look.
![Custom charts](https://cloud.githubusercontent.com/assets/15180106/26762051/62ae915e-493b-11e7-8518-bd25a3886650.png)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fgo_expvar%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/go_expvar/go_expvar.chart.py b/collectors/python.d.plugin/go_expvar/go_expvar.chart.py
index 76e8b72e..e82a8776 100644
--- a/collectors/python.d.plugin/go_expvar/go_expvar.chart.py
+++ b/collectors/python.d.plugin/go_expvar/go_expvar.chart.py
@@ -6,17 +6,24 @@
from __future__ import division
import json
+from collections import namedtuple
+
from bases.FrameworkServices.UrlService import UrlService
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
+MEMSTATS_ORDER = [
+ 'memstats_heap',
+ 'memstats_stack',
+ 'memstats_mspan',
+ 'memstats_mcache',
+ 'memstats_sys',
+ 'memstats_live_objects',
+ 'memstats_gc_pauses',
+]
MEMSTATS_CHARTS = {
'memstats_heap': {
- 'options': ['heap', 'memory: size of heap memory structures', 'kB', 'memstats',
+ 'options': ['heap', 'memory: size of heap memory structures', 'KiB', 'memstats',
'expvar.memstats.heap', 'line'],
'lines': [
['memstats_heap_alloc', 'alloc', 'absolute', 1, 1024],
@@ -24,21 +31,21 @@ MEMSTATS_CHARTS = {
]
},
'memstats_stack': {
- 'options': ['stack', 'memory: size of stack memory structures', 'kB', 'memstats',
+ 'options': ['stack', 'memory: size of stack memory structures', 'KiB', 'memstats',
'expvar.memstats.stack', 'line'],
'lines': [
['memstats_stack_inuse', 'inuse', 'absolute', 1, 1024]
]
},
'memstats_mspan': {
- 'options': ['mspan', 'memory: size of mspan memory structures', 'kB', 'memstats',
+ 'options': ['mspan', 'memory: size of mspan memory structures', 'KiB', 'memstats',
'expvar.memstats.mspan', 'line'],
'lines': [
['memstats_mspan_inuse', 'inuse', 'absolute', 1, 1024]
]
},
'memstats_mcache': {
- 'options': ['mcache', 'memory: size of mcache memory structures', 'kB', 'memstats',
+ 'options': ['mcache', 'memory: size of mcache memory structures', 'KiB', 'memstats',
'expvar.memstats.mcache', 'line'],
'lines': [
['memstats_mcache_inuse', 'inuse', 'absolute', 1, 1024]
@@ -52,7 +59,7 @@ MEMSTATS_CHARTS = {
]
},
'memstats_sys': {
- 'options': ['sys', 'memory: size of reserved virtual address space', 'kB', 'memstats',
+ 'options': ['sys', 'memory: size of reserved virtual address space', 'KiB', 'memstats',
'expvar.memstats.sys', 'line'],
'lines': [
['memstats_sys', 'sys', 'absolute', 1, 1024]
@@ -67,8 +74,14 @@ MEMSTATS_CHARTS = {
}
}
-MEMSTATS_ORDER = ['memstats_heap', 'memstats_stack', 'memstats_mspan', 'memstats_mcache',
- 'memstats_sys', 'memstats_live_objects', 'memstats_gc_pauses']
+EXPVAR = namedtuple(
+ "EXPVAR",
+ [
+ "key",
+ "type",
+ "id",
+ ]
+)
def flatten(d, top='', sep='.'):
@@ -85,7 +98,6 @@ def flatten(d, top='', sep='.'):
class Service(UrlService):
def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name)
-
# if memstats collection is enabled, add the charts and their order
if self.configuration.get('collect_memstats'):
self.definitions = dict(MEMSTATS_CHARTS)
@@ -118,7 +130,7 @@ class Service(UrlService):
def _parse_extra_charts_config(self, extra_charts_config):
# a place to store the expvar keys and their types
- self.expvars = dict()
+ self.expvars = list()
for chart in extra_charts_config:
@@ -156,11 +168,8 @@ class Service(UrlService):
self.info('Unsupported expvar_type "{0}". Must be "int" or "float"'.format(ev_type))
continue
- if ev_key in self.expvars:
- self.info('Duplicate expvar key {0}: skipping line.'.format(ev_key))
- continue
-
- self.expvars[ev_key] = (ev_type, line_id)
+ # self.expvars[ev_key] = (ev_type, line_id)
+ self.expvars.append(EXPVAR(ev_key, ev_type, line_id))
chart_dict['lines'].append(
[
@@ -197,21 +206,21 @@ class Service(UrlService):
# the rest of the data, thus avoiding needless iterating over the multiply nested memstats dict.
del (data['memstats'])
flattened = flatten(data)
- for k, v in flattened.items():
- ev = self.expvars.get(k)
- if not ev:
- # expvar is not defined in config, skip it
+
+ for ev in self.expvars:
+ v = flattened.get(ev.key)
+
+ if v is None:
continue
+
try:
- key_type, line_id = ev
- if key_type == 'int':
- expvars[line_id] = int(v)
- elif key_type == 'float':
- # if the value type is float, multiply it by 1000 and set line divisor to 1000
- expvars[line_id] = float(v) * 100
+ if ev.type == 'int':
+ expvars[ev.id] = int(v)
+ elif ev.type == 'float':
+ expvars[ev.id] = float(v) * 100
except ValueError:
- self.info('Failed to parse value for key {0} as {1}, ignoring key.'.format(k, key_type))
- del self.expvars[k]
+ self.info('Failed to parse value for key {0} as {1}, ignoring key.'.format(ev.key, ev.type))
+ return None
return expvars
diff --git a/collectors/python.d.plugin/go_expvar/go_expvar.conf b/collectors/python.d.plugin/go_expvar/go_expvar.conf
index af89158a..4b821cde 100644
--- a/collectors/python.d.plugin/go_expvar/go_expvar.conf
+++ b/collectors/python.d.plugin/go_expvar/go_expvar.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -53,7 +51,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, this plugin also supports the following:
diff --git a/collectors/python.d.plugin/haproxy/README.md b/collectors/python.d.plugin/haproxy/README.md
index 4bff2567..4bd80a23 100644
--- a/collectors/python.d.plugin/haproxy/README.md
+++ b/collectors/python.d.plugin/haproxy/README.md
@@ -47,3 +47,5 @@ via_socket:
If no configuration is given, module will fail to run.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fhaproxy%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/haproxy/haproxy.chart.py b/collectors/python.d.plugin/haproxy/haproxy.chart.py
index a46689f5..d97d28d2 100644
--- a/collectors/python.d.plugin/haproxy/haproxy.chart.py
+++ b/collectors/python.d.plugin/haproxy/haproxy.chart.py
@@ -14,12 +14,6 @@ except ImportError:
from bases.FrameworkServices.SocketService import SocketService
from bases.FrameworkServices.UrlService import UrlService
-
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
-
# charts order (can be overridden if you want less charts, or different order)
ORDER = [
'fbin',
@@ -56,11 +50,11 @@ ORDER = [
CHARTS = {
'fbin': {
- 'options': [None, 'Kilobytes In', 'KB/s', 'frontend', 'haproxy_f.bin', 'line'],
+ 'options': [None, 'Kilobytes In', 'KiB/s', 'frontend', 'haproxy_f.bin', 'line'],
'lines': []
},
'fbout': {
- 'options': [None, 'Kilobytes Out', 'KB/s', 'frontend', 'haproxy_f.bout', 'line'],
+ 'options': [None, 'Kilobytes Out', 'KiB/s', 'frontend', 'haproxy_f.bout', 'line'],
'lines': []
},
'fscur': {
@@ -101,11 +95,11 @@ CHARTS = {
'lines': []
},
'bbin': {
- 'options': [None, 'Kilobytes In', 'KB/s', 'backend', 'haproxy_b.bin', 'line'],
+ 'options': [None, 'Kilobytes In', 'KiB/s', 'backend', 'haproxy_b.bin', 'line'],
'lines': []
},
'bbout': {
- 'options': [None, 'Kilobytes Out', 'KB/s', 'backend', 'haproxy_b.bout', 'line'],
+ 'options': [None, 'Kilobytes Out', 'KiB/s', 'backend', 'haproxy_b.bout', 'line'],
'lines': []
},
'bscur': {
@@ -146,41 +140,39 @@ CHARTS = {
'lines': []
},
'bqtime': {
- 'options': [None, 'The average queue time over the 1024 last requests', 'ms', 'backend',
+ 'options': [None, 'The average queue time over the 1024 last requests', 'milliseconds', 'backend',
'haproxy_b.qtime', 'line'],
'lines': []
},
'bctime': {
- 'options': [None, 'The average connect time over the 1024 last requests', 'ms', 'backend',
+ 'options': [None, 'The average connect time over the 1024 last requests', 'milliseconds', 'backend',
'haproxy_b.ctime', 'line'],
'lines': []
},
'brtime': {
- 'options': [None, 'The average response time over the 1024 last requests', 'ms', 'backend',
+ 'options': [None, 'The average response time over the 1024 last requests', 'milliseconds', 'backend',
'haproxy_b.rtime', 'line'],
'lines': []
},
'bttime': {
- 'options': [None, 'The average total session time over the 1024 last requests', 'ms', 'backend',
+ 'options': [None, 'The average total session time over the 1024 last requests', 'milliseconds', 'backend',
'haproxy_b.ttime', 'line'],
'lines': []
},
'health_sdown': {
- 'options': [None, 'Backend Servers In DOWN State', 'failed servers', 'health',
- 'haproxy_hs.down', 'line'],
+ 'options': [None, 'Backend Servers In DOWN State', 'failed servers', 'health', 'haproxy_hs.down', 'line'],
'lines': []
},
'health_sup': {
- 'options': [None, 'Backend Servers In UP State', 'health servers', 'health',
- 'haproxy_hs.up', 'line'],
+ 'options': [None, 'Backend Servers In UP State', 'health servers', 'health', 'haproxy_hs.up', 'line'],
'lines': []
},
'health_bdown': {
- 'options': [None, 'Is Backend Alive? 1 = DOWN', 'failed backend', 'health', 'haproxy_hb.down', 'line'],
+ 'options': [None, 'Is Backend Failed?', 'boolean', 'health', 'haproxy_hb.down', 'line'],
'lines': []
},
'health_idle': {
- 'options': [None, 'The Ratio Of Polling Time Vs Total Time', 'percent', 'health', 'haproxy.idle', 'line'],
+ 'options': [None, 'The Ratio Of Polling Time Vs Total Time', 'percentage', 'health', 'haproxy.idle', 'line'],
'lines': [
['idle', None, 'absolute']
]
@@ -214,6 +206,7 @@ REGEX = dict(url=re_compile(r'idle = (?P<idle>[0-9]+)'),
socket=re_compile(r'Idle_pct: (?P<idle>[0-9]+)'))
+# TODO: the code is unreadable
class Service(UrlService, SocketService):
def __init__(self, configuration=None, name=None):
if 'socket' in configuration:
diff --git a/collectors/python.d.plugin/haproxy/haproxy.conf b/collectors/python.d.plugin/haproxy/haproxy.conf
index a40dd76a..10a0df3c 100644
--- a/collectors/python.d.plugin/haproxy/haproxy.conf
+++ b/collectors/python.d.plugin/haproxy/haproxy.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, haproxy also supports the following:
diff --git a/collectors/python.d.plugin/hddtemp/README.md b/collectors/python.d.plugin/hddtemp/README.md
index 1236186a..d9f254d5 100644
--- a/collectors/python.d.plugin/hddtemp/README.md
+++ b/collectors/python.d.plugin/hddtemp/README.md
@@ -20,3 +20,5 @@ port: 7634
If no configuration is given, module will attempt to connect to hddtemp daemon on `127.0.0.1:7634` address
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fhddtemp%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/hddtemp/hddtemp.chart.py b/collectors/python.d.plugin/hddtemp/hddtemp.chart.py
index dea70117..810aaacc 100644
--- a/collectors/python.d.plugin/hddtemp/hddtemp.chart.py
+++ b/collectors/python.d.plugin/hddtemp/hddtemp.chart.py
@@ -12,7 +12,9 @@ from copy import deepcopy
from bases.FrameworkServices.SocketService import SocketService
-ORDER = ['temperatures']
+ORDER = [
+ 'temperatures',
+]
CHARTS = {
'temperatures': {
@@ -39,11 +41,11 @@ class Service(SocketService):
SocketService.__init__(self, configuration=configuration, name=name)
self.order = ORDER
self.definitions = deepcopy(CHARTS)
+ self.do_only = self.configuration.get('devices')
self._keep_alive = False
self.request = ""
self.host = "127.0.0.1"
self.port = 7634
- self.do_only = self.configuration.get('devices')
def get_disks(self):
r = self._get_raw_data()
@@ -89,8 +91,7 @@ class Service(SocketService):
return False
for d in disks:
- n = d.id if d.id.startswith('sd') else d.name
- dim = [d.id, n]
+ dim = [d.id]
self.definitions['temperatures']['lines'].append(dim)
return True
diff --git a/collectors/python.d.plugin/hddtemp/hddtemp.conf b/collectors/python.d.plugin/hddtemp/hddtemp.conf
index 9165798a..b2d7aef6 100644
--- a/collectors/python.d.plugin/hddtemp/hddtemp.conf
+++ b/collectors/python.d.plugin/hddtemp/hddtemp.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, hddtemp also supports the following:
diff --git a/collectors/python.d.plugin/httpcheck/README.md b/collectors/python.d.plugin/httpcheck/README.md
index 75910766..4cd024d1 100644
--- a/collectors/python.d.plugin/httpcheck/README.md
+++ b/collectors/python.d.plugin/httpcheck/README.md
@@ -39,3 +39,5 @@ server:
response time is low and should be used as reference only.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fhttpcheck%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/httpcheck/httpcheck.chart.py b/collectors/python.d.plugin/httpcheck/httpcheck.chart.py
index f046f33c..fd51370d 100644
--- a/collectors/python.d.plugin/httpcheck/httpcheck.chart.py
+++ b/collectors/python.d.plugin/httpcheck/httpcheck.chart.py
@@ -16,7 +16,6 @@ from bases.FrameworkServices.UrlService import UrlService
# default module values (can be overridden per job in `config`)
update_every = 3
priority = 60000
-retries = 60
# Response
HTTP_RESPONSE_TIME = 'time'
@@ -29,11 +28,15 @@ HTTP_BAD_STATUS = 'bad_status'
HTTP_TIMEOUT = 'timeout'
HTTP_NO_CONNECTION = 'no_connection'
-ORDER = ['response_time', 'response_length', 'status']
+ORDER = [
+ 'response_time',
+ 'response_length',
+ 'status',
+]
CHARTS = {
'response_time': {
- 'options': [None, 'HTTP response time', 'ms', 'response', 'httpcheck.responsetime', 'line'],
+ 'options': [None, 'HTTP response time', 'milliseconds', 'response', 'httpcheck.responsetime', 'line'],
'lines': [
[HTTP_RESPONSE_TIME, 'time', 'absolute', 100, 1000]
]
@@ -60,12 +63,12 @@ CHARTS = {
class Service(UrlService):
def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
pattern = self.configuration.get('regex')
self.regex = re.compile(pattern) if pattern else None
self.status_codes_accepted = self.configuration.get('status_accepted', [200])
self.follow_redirect = self.configuration.get('redirect', True)
- self.order = ORDER
- self.definitions = CHARTS
def _get_data(self):
"""
diff --git a/collectors/python.d.plugin/httpcheck/httpcheck.conf b/collectors/python.d.plugin/httpcheck/httpcheck.conf
index bd21b5af..1e1dd020 100644
--- a/collectors/python.d.plugin/httpcheck/httpcheck.conf
+++ b/collectors/python.d.plugin/httpcheck/httpcheck.conf
@@ -27,6 +27,10 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
# chart_cleanup sets the default chart cleanup interval in iterations.
# A chart is marked as obsolete if it has not been updated
# 'chart_cleanup' iterations in a row.
@@ -61,7 +65,7 @@ chart_cleanup: 0
# # JOBs sharing a name are mutually exclusive
# update_every: 3 # [optional] the JOB's data collection frequency
# priority: 60000 # [optional] the JOB's order on the dashboard
-# retries: 60 # [optional] the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# timeout: 1 # [optional] the timeout when connecting, supports decimals (e.g. 0.5s)
# url: 'http[s]://host-ip-or-dns[:port][path]'
# # [required] the remote host url to connect to. If [:port] is missing, it defaults to 80
diff --git a/collectors/python.d.plugin/icecast/README.md b/collectors/python.d.plugin/icecast/README.md
index a28a6c39..068da6a0 100644
--- a/collectors/python.d.plugin/icecast/README.md
+++ b/collectors/python.d.plugin/icecast/README.md
@@ -24,3 +24,5 @@ remote:
Without configuration, module attempts to connect to `http://localhost:8443/status-json.xsl`
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Ficecast%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/icecast/icecast.chart.py b/collectors/python.d.plugin/icecast/icecast.chart.py
index d8813f9b..40eaf89b 100644
--- a/collectors/python.d.plugin/icecast/icecast.chart.py
+++ b/collectors/python.d.plugin/icecast/icecast.chart.py
@@ -8,11 +8,9 @@ import json
from bases.FrameworkServices.UrlService import UrlService
-priority = 60000
-retries = 60
-
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['listeners']
+ORDER = [
+ 'listeners',
+]
CHARTS = {
'listeners': {
diff --git a/collectors/python.d.plugin/icecast/icecast.conf b/collectors/python.d.plugin/icecast/icecast.conf
index a900d06d..a33074ae 100644
--- a/collectors/python.d.plugin/icecast/icecast.conf
+++ b/collectors/python.d.plugin/icecast/icecast.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, icecast also supports the following:
diff --git a/collectors/python.d.plugin/ipfs/README.md b/collectors/python.d.plugin/ipfs/README.md
index a30649a5..a8392037 100644
--- a/collectors/python.d.plugin/ipfs/README.md
+++ b/collectors/python.d.plugin/ipfs/README.md
@@ -23,3 +23,5 @@ localhost:
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fipfs%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/ipfs/ipfs.chart.py b/collectors/python.d.plugin/ipfs/ipfs.chart.py
index 3f6794e4..8c89b4be 100644
--- a/collectors/python.d.plugin/ipfs/ipfs.chart.py
+++ b/collectors/python.d.plugin/ipfs/ipfs.chart.py
@@ -7,25 +7,17 @@ import json
from bases.FrameworkServices.UrlService import UrlService
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
-# default job configuration (overridden by python.d.plugin)
-# config = {'local': {
-# 'update_every': update_every,
-# 'retries': retries,
-# 'priority': priority,
-# 'url': 'http://localhost:5001'
-# }}
-
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['bandwidth', 'peers', 'repo_size', 'repo_objects']
+ORDER = [
+ 'bandwidth',
+ 'peers',
+ 'repo_size',
+ 'repo_objects',
+]
CHARTS = {
'bandwidth': {
- 'options': [None, 'IPFS Bandwidth', 'kbits/s', 'Bandwidth', 'ipfs.bandwidth', 'line'],
+ 'options': [None, 'IPFS Bandwidth', 'kilobits/s', 'Bandwidth', 'ipfs.bandwidth', 'line'],
'lines': [
['in', None, 'absolute', 8, 1000],
['out', None, 'absolute', -8, 1000]
@@ -38,10 +30,10 @@ CHARTS = {
]
},
'repo_size': {
- 'options': [None, 'IPFS Repo Size', 'GB', 'Size', 'ipfs.repo_size', 'area'],
+ 'options': [None, 'IPFS Repo Size', 'GiB', 'Size', 'ipfs.repo_size', 'area'],
'lines': [
- ['avail', None, 'absolute', 1, 1e9],
- ['size', None, 'absolute', 1, 1e9],
+ ['avail', None, 'absolute', 1, 1 << 30],
+ ['size', None, 'absolute', 1, 1 << 30],
]
},
'repo_objects': {
@@ -69,11 +61,11 @@ SI_zeroes = {
class Service(UrlService):
def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name)
- self.baseurl = self.configuration.get('url', 'http://localhost:5001')
self.order = ORDER
self.definitions = CHARTS
- self.__storage_max = None
+ self.baseurl = self.configuration.get('url', 'http://localhost:5001')
self.do_pinapi = self.configuration.get('pinapi')
+ self.__storage_max = None
def _get_json(self, sub_url):
"""
@@ -135,6 +127,6 @@ class Service(UrlService):
for new_key, orig_key, xmute in cfg[suburl]:
try:
r[new_key] = xmute(in_json[orig_key])
- except Exception:
- continue
+ except Exception as error:
+ self.debug(error)
return r or None
diff --git a/collectors/python.d.plugin/ipfs/ipfs.conf b/collectors/python.d.plugin/ipfs/ipfs.conf
index e3df0f6b..c7e18648 100644
--- a/collectors/python.d.plugin/ipfs/ipfs.conf
+++ b/collectors/python.d.plugin/ipfs/ipfs.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, ipfs also supports the following:
diff --git a/collectors/python.d.plugin/isc_dhcpd/README.md b/collectors/python.d.plugin/isc_dhcpd/README.md
index 334d86e3..67547e2f 100644
--- a/collectors/python.d.plugin/isc_dhcpd/README.md
+++ b/collectors/python.d.plugin/isc_dhcpd/README.md
@@ -32,3 +32,5 @@ In case of python2 you need to install `py2-ipaddress` to make plugin work.
The module will not work If no configuration is given.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fisc_dhcpd%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py
index a9f27494..bbe7a936 100644
--- a/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py
+++ b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py
@@ -19,14 +19,16 @@ from copy import deepcopy
from bases.FrameworkServices.SimpleService import SimpleService
-priority = 60000
-retries = 60
-ORDER = ['pools_utilization', 'pools_active_leases', 'leases_total']
+ORDER = [
+ 'pools_utilization',
+ 'pools_active_leases',
+ 'leases_total',
+]
CHARTS = {
'pools_utilization': {
- 'options': [None, 'Pools Utilization', '%', 'utilization', 'isc_dhcpd.utilization', 'line'],
+ 'options': [None, 'Pools Utilization', 'percentage', 'utilization', 'isc_dhcpd.utilization', 'line'],
'lines': []
},
'pools_active_leases': {
@@ -120,7 +122,6 @@ class Service(SimpleService):
SimpleService.__init__(self, configuration=configuration, name=name)
self.order = ORDER
self.definitions = deepcopy(CHARTS)
-
lease_path = self.configuration.get('leases_path', '/var/lib/dhcp/dhcpd.leases')
self.dhcpd_leases = DhcpdLeasesFile(path=lease_path)
self.pools = list()
@@ -131,7 +132,7 @@ class Service(SimpleService):
def check(self):
if not HAVE_IP_ADDRESS:
- self.error("'python-ipaddress' module is needed")
+ self.error("'python-ipaddress' package is needed")
return False
if not self.dhcpd_leases.is_valid():
@@ -190,6 +191,17 @@ class Service(SimpleService):
def create_charts(self):
for pool in self.pools:
- self.definitions['pools_utilization']['lines'].append([pool.id + '_utilization', pool.name,
- 'absolute', 1, 100])
- self.definitions['pools_active_leases']['lines'].append([pool.id + '_active_leases', pool.name])
+ dim = [
+ pool.id + '_utilization',
+ pool.name,
+ 'absolute',
+ 1,
+ 100,
+ ]
+ self.definitions['pools_utilization']['lines'].append(dim)
+
+ dim = [
+ pool.id + '_active_leases',
+ pool.name,
+ ]
+ self.definitions['pools_active_leases']['lines'].append(dim)
diff --git a/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf
index 4a4c4a5e..8dcb5082 100644
--- a/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf
+++ b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, isc_dhcpd supports the following:
diff --git a/collectors/python.d.plugin/linux_power_supply/README.md b/collectors/python.d.plugin/linux_power_supply/README.md
index 5cfbe41c..f5b05d19 100644
--- a/collectors/python.d.plugin/linux_power_supply/README.md
+++ b/collectors/python.d.plugin/linux_power_supply/README.md
@@ -1,4 +1,9 @@
-# linux\_power\_supply
+# Linux power supply
+
+> THIS MODULE IS OBSOLETE.
+> USE THE [PROC PLUGIN](../../proc.plugin) - IT IS MORE EFFICIENT
+
+---
This module monitors variosu metrics reported by power supply drivers
on Linux. This allows tracking and alerting on things like remaining
@@ -65,3 +70,5 @@ the corresponding `min` or `empty`, which will then always read as zero.
This way, alerts which match on these will still work.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Flinux_power_supply%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/linux_power_supply/linux_power_supply.conf b/collectors/python.d.plugin/linux_power_supply/linux_power_supply.conf
index 3cb610f7..96eeef44 100644
--- a/collectors/python.d.plugin/linux_power_supply/linux_power_supply.conf
+++ b/collectors/python.d.plugin/linux_power_supply/linux_power_supply.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_everye
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# In addition to the above parameters, linux_power_supply also supports
diff --git a/collectors/python.d.plugin/litespeed/README.md b/collectors/python.d.plugin/litespeed/README.md
index d1482f33..88b67253 100644
--- a/collectors/python.d.plugin/litespeed/README.md
+++ b/collectors/python.d.plugin/litespeed/README.md
@@ -45,3 +45,5 @@ local:
If no configuration is given, module will use "/tmp/lshttpd/".
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Flitespeed%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/litespeed/litespeed.chart.py b/collectors/python.d.plugin/litespeed/litespeed.chart.py
index efdc6869..9da94213 100644
--- a/collectors/python.d.plugin/litespeed/litespeed.chart.py
+++ b/collectors/python.d.plugin/litespeed/litespeed.chart.py
@@ -16,11 +16,15 @@ update_every = 10
# charts order (can be overridden if you want less charts, or different order)
ORDER = [
- 'net_throughput_http', 'net_throughput_https', # net throughput
- 'connections_http', 'connections_https', # connections
- 'requests', 'requests_processing', # requests
- 'pub_cache_hits', 'private_cache_hits', # cache
- 'static_hits' # static
+ 'net_throughput_http', # net throughput
+ 'net_throughput_https', # net throughput
+ 'connections_http', # connections
+ 'connections_https', # connections
+ 'requests', # requests
+ 'requests_processing', # requests
+ 'pub_cache_hits', # cache
+ 'private_cache_hits', # cache
+ 'static_hits', # static
]
CHARTS = {
diff --git a/collectors/python.d.plugin/litespeed/litespeed.conf b/collectors/python.d.plugin/litespeed/litespeed.conf
index 17d0f690..a326e184 100644
--- a/collectors/python.d.plugin/litespeed/litespeed.conf
+++ b/collectors/python.d.plugin/litespeed/litespeed.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, lightspeed also supports the following:
diff --git a/collectors/python.d.plugin/logind/README.md b/collectors/python.d.plugin/logind/README.md
index 8f8670d4..c35630c8 100644
--- a/collectors/python.d.plugin/logind/README.md
+++ b/collectors/python.d.plugin/logind/README.md
@@ -52,3 +52,5 @@ is currently disabled by default, and needs to be explicitly enabled in
`/etc/netdata/python.d.conf` before it will run.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Flogind%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/logind/logind.chart.py b/collectors/python.d.plugin/logind/logind.chart.py
index bfc486c7..70866864 100644
--- a/collectors/python.d.plugin/logind/logind.chart.py
+++ b/collectors/python.d.plugin/logind/logind.chart.py
@@ -8,7 +8,13 @@ from bases.FrameworkServices.ExecutableService import ExecutableService
priority = 59999
disabled_by_default = True
-ORDER = ['sessions', 'users', 'seats']
+LOGINCTL_COMMAND = 'loginctl list-sessions --no-legend'
+
+ORDER = [
+ 'sessions',
+ 'users',
+ 'seats',
+]
CHARTS = {
'sessions': {
@@ -39,9 +45,9 @@ CHARTS = {
class Service(ExecutableService):
def __init__(self, configuration=None, name=None):
ExecutableService.__init__(self, configuration=configuration, name=name)
- self.command = 'loginctl list-sessions --no-legend'
self.order = ORDER
self.definitions = CHARTS
+ self.command = LOGINCTL_COMMAND
def _get_data(self):
ret = {
diff --git a/collectors/python.d.plugin/logind/logind.conf b/collectors/python.d.plugin/logind/logind.conf
index 0623493d..01a859d2 100644
--- a/collectors/python.d.plugin/logind/logind.conf
+++ b/collectors/python.d.plugin/logind/logind.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,5 +56,5 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
diff --git a/collectors/python.d.plugin/mdstat/README.md b/collectors/python.d.plugin/mdstat/README.md
index 1ff8f7da..f88346ee 100644
--- a/collectors/python.d.plugin/mdstat/README.md
+++ b/collectors/python.d.plugin/mdstat/README.md
@@ -1,5 +1,10 @@
# mdstat
+> THIS MODULE IS OBSOLETE.
+> USE THE [PROC PLUGIN](../../proc.plugin) - IT IS MORE EFFICIENT
+
+---
+
Module monitor /proc/mdstat
It produces:
@@ -24,3 +29,5 @@ It produces:
No configuration is needed.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fmdstat%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/mdstat/mdstat.conf b/collectors/python.d.plugin/mdstat/mdstat.conf
index 66a2f153..c72b6383 100644
--- a/collectors/python.d.plugin/mdstat/mdstat.conf
+++ b/collectors/python.d.plugin/mdstat/mdstat.conf
@@ -19,11 +19,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
diff --git a/collectors/python.d.plugin/megacli/README.md b/collectors/python.d.plugin/megacli/README.md
index d288a635..e96015dd 100644
--- a/collectors/python.d.plugin/megacli/README.md
+++ b/collectors/python.d.plugin/megacli/README.md
@@ -46,3 +46,5 @@ do_battery: yes
```
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fmegacli%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/megacli/megacli.chart.py b/collectors/python.d.plugin/megacli/megacli.chart.py
index 41a1079f..e1a05e41 100644
--- a/collectors/python.d.plugin/megacli/megacli.chart.py
+++ b/collectors/python.d.plugin/megacli/megacli.chart.py
@@ -66,7 +66,7 @@ def battery_charts(bats):
charts.update(
{
'bbu_{0}_relative_charge'.format(b.id): {
- 'options': [None, 'Relative State of Charge', '%', 'battery',
+ 'options': [None, 'Relative State of Charge', 'percentage', 'battery',
'megacli.bbu_relative_charge', 'line'],
'lines': [
['bbu_{0}_relative_charge'.format(b.id), 'adapter {0}'.format(b.id)],
@@ -180,8 +180,8 @@ class Service(ExecutableService):
ExecutableService.__init__(self, configuration=configuration, name=name)
self.order = list()
self.definitions = dict()
- self.megacli = Megacli()
self.do_battery = self.configuration.get('do_battery')
+ self.megacli = Megacli()
def check_sudo(self):
err = self._get_raw_data(command=self.megacli.sudo_check, stderr=True)
diff --git a/collectors/python.d.plugin/megacli/megacli.conf b/collectors/python.d.plugin/megacli/megacli.conf
index 73afb2f7..1af4292d 100644
--- a/collectors/python.d.plugin/megacli/megacli.conf
+++ b/collectors/python.d.plugin/megacli/megacli.conf
@@ -19,11 +19,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -50,7 +48,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, megacli also supports the following:
diff --git a/collectors/python.d.plugin/memcached/README.md b/collectors/python.d.plugin/memcached/README.md
index 3521c109..98627c4a 100644
--- a/collectors/python.d.plugin/memcached/README.md
+++ b/collectors/python.d.plugin/memcached/README.md
@@ -67,3 +67,5 @@ localtcpip:
If no configuration is given, module will attempt to connect to memcached instance on `127.0.0.1:11211` address.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fmemcached%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/memcached/memcached.chart.py b/collectors/python.d.plugin/memcached/memcached.chart.py
index 3c310ec6..9803dbb0 100644
--- a/collectors/python.d.plugin/memcached/memcached.chart.py
+++ b/collectors/python.d.plugin/memcached/memcached.chart.py
@@ -5,37 +5,37 @@
from bases.FrameworkServices.SocketService import SocketService
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
-
-# default job configuration (overridden by python.d.plugin)
-# config = {'local': {
-# 'update_every': update_every,
-# 'retries': retries,
-# 'priority': priority,
-# 'host': 'localhost',
-# 'port': 11211,
-# 'unix_socket': None
-# }}
-
-ORDER = ['cache', 'net', 'connections', 'items', 'evicted_reclaimed',
- 'get', 'get_rate', 'set_rate', 'cas', 'delete', 'increment', 'decrement', 'touch', 'touch_rate']
+
+ORDER = [
+ 'cache',
+ 'net',
+ 'connections',
+ 'items',
+ 'evicted_reclaimed',
+ 'get',
+ 'get_rate',
+ 'set_rate',
+ 'cas',
+ 'delete',
+ 'increment',
+ 'decrement',
+ 'touch',
+ 'touch_rate',
+]
CHARTS = {
'cache': {
- 'options': [None, 'Cache Size', 'megabytes', 'cache', 'memcached.cache', 'stacked'],
+ 'options': [None, 'Cache Size', 'MiB', 'cache', 'memcached.cache', 'stacked'],
'lines': [
- ['avail', 'available', 'absolute', 1, 1048576],
- ['used', 'used', 'absolute', 1, 1048576]
+ ['avail', 'available', 'absolute', 1, 1 << 20],
+ ['used', 'used', 'absolute', 1, 1 << 20]
]
},
'net': {
'options': [None, 'Network', 'kilobits/s', 'network', 'memcached.net', 'area'],
'lines': [
- ['bytes_read', 'in', 'incremental', 8, 1024],
- ['bytes_written', 'out', 'incremental', -8, 1024]
+ ['bytes_read', 'in', 'incremental', 8, 1000],
+ ['bytes_written', 'out', 'incremental', -8, 1000],
]
},
'connections': {
@@ -127,13 +127,13 @@ CHARTS = {
class Service(SocketService):
def __init__(self, configuration=None, name=None):
SocketService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
self.request = 'stats\r\n'
self.host = 'localhost'
self.port = 11211
self._keep_alive = True
self.unix_socket = None
- self.order = ORDER
- self.definitions = CHARTS
def _get_data(self):
"""
diff --git a/collectors/python.d.plugin/memcached/memcached.conf b/collectors/python.d.plugin/memcached/memcached.conf
index 85c3daf6..3286b462 100644
--- a/collectors/python.d.plugin/memcached/memcached.conf
+++ b/collectors/python.d.plugin/memcached/memcached.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, memcached also supports the following:
diff --git a/collectors/python.d.plugin/mongodb/README.md b/collectors/python.d.plugin/mongodb/README.md
index 8e5f652c..ac8930dd 100644
--- a/collectors/python.d.plugin/mongodb/README.md
+++ b/collectors/python.d.plugin/mongodb/README.md
@@ -121,6 +121,33 @@ Number of charts depends on mongodb version, storage engine and other features (
26. **Replication set member heartbeat latency**
* member (time when last heartbeat was received from replica set member)
+### prerequisite
+Create a read-only user for the netdata in the admin database.
+
+1. Authenticate as the admin user.
+
+```
+use admin
+db.auth("admin", "<MONGODB_ADMIN_PASSWORD>")
+```
+
+2. Create a user.
+
+```
+# MongoDB 2.x.
+db.addUser("netdata", "<UNIQUE_PASSWORD>", true)
+
+# MongoDB 3.x or higher.
+db.createUser({
+ "user":"netdata",
+ "pwd": "<UNIQUE_PASSWORD>",
+ "roles" : [
+ {role: 'read', db: 'admin' },
+ {role: 'clusterMonitor', db: 'admin'},
+ {role: 'read', db: 'local' }
+ ]
+})
+```
### configuration
@@ -139,3 +166,5 @@ local:
If no configuration is given, module will attempt to connect to mongodb daemon on `127.0.0.1:27017` address
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fmongodb%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/mongodb/mongodb.chart.py b/collectors/python.d.plugin/mongodb/mongodb.chart.py
index 10344342..92740ff8 100644
--- a/collectors/python.d.plugin/mongodb/mongodb.chart.py
+++ b/collectors/python.d.plugin/mongodb/mongodb.chart.py
@@ -16,10 +16,6 @@ except ImportError:
from bases.FrameworkServices.SimpleService import SimpleService
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
REPL_SET_STATES = [
('1', 'primary'),
@@ -209,21 +205,21 @@ CHARTS = {
]
},
'journaling_volume': {
- 'options': [None, 'Volume of data written to the journal', 'MB', 'database performance',
+ 'options': [None, 'Volume of data written to the journal', 'MiB', 'database performance',
'mongodb.journaling_volume', 'line'],
'lines': [
['journaledMB', 'volume', 'absolute', 1, 100]
]
},
'background_flush_average': {
- 'options': [None, 'Average time taken by flushes to execute', 'ms', 'database performance',
+ 'options': [None, 'Average time taken by flushes to execute', 'milliseconds', 'database performance',
'mongodb.background_flush_average', 'line'],
'lines': [
['average_ms', 'time', 'absolute', 1, 100]
]
},
'background_flush_last': {
- 'options': [None, 'Time taken by the last flush operation to execute', 'ms', 'database performance',
+ 'options': [None, 'Time taken by the last flush operation to execute', 'milliseconds', 'database performance',
'mongodb.background_flush_last', 'line'],
'lines': [
['last_ms', 'time', 'absolute', 1, 100]
@@ -269,7 +265,7 @@ CHARTS = {
]
},
'memory': {
- 'options': [None, 'Memory metrics', 'MB', 'resource utilization', 'mongodb.memory', 'stacked'],
+ 'options': [None, 'Memory metrics', 'MiB', 'resource utilization', 'mongodb.memory', 'stacked'],
'lines': [
['virtual', None, 'absolute', 1, 1],
['resident', None, 'absolute', 1, 1],
@@ -313,7 +309,7 @@ CHARTS = {
},
'wiredtiger_cache': {
'options': [None, 'The percentage of the wiredTiger cache that is in use and cache with dirty bytes',
- 'percent', 'resource utilization', 'mongodb.wiredtiger_cache', 'stacked'],
+ 'percentage', 'resource utilization', 'mongodb.wiredtiger_cache', 'stacked'],
'lines': [
['wiredTiger_percent_clean', 'inuse', 'absolute', 1, 1000],
['wiredTiger_percent_dirty', 'dirty', 'absolute', 1, 1000]
@@ -333,14 +329,14 @@ CHARTS = {
'lines': []
},
'tcmalloc_generic': {
- 'options': [None, 'Tcmalloc generic metrics', 'MB', 'tcmalloc', 'mongodb.tcmalloc_generic', 'stacked'],
+ 'options': [None, 'Tcmalloc generic metrics', 'MiB', 'tcmalloc', 'mongodb.tcmalloc_generic', 'stacked'],
'lines': [
- ['current_allocated_bytes', 'allocated', 'absolute', 1, 1048576],
- ['heap_size', 'heap_size', 'absolute', 1, 1048576]
+ ['current_allocated_bytes', 'allocated', 'absolute', 1, 1 << 20],
+ ['heap_size', 'heap_size', 'absolute', 1, 1 << 20]
]
},
'tcmalloc_metrics': {
- 'options': [None, 'Tcmalloc metrics', 'KB', 'tcmalloc', 'mongodb.tcmalloc_metrics', 'stacked'],
+ 'options': [None, 'Tcmalloc metrics', 'KiB', 'tcmalloc', 'mongodb.tcmalloc_metrics', 'stacked'],
'lines': [
['central_cache_free_bytes', 'central_cache_free', 'absolute', 1, 1024],
['current_total_thread_cache_bytes', 'current_total_thread_cache', 'absolute', 1, 1024],
diff --git a/collectors/python.d.plugin/mongodb/mongodb.conf b/collectors/python.d.plugin/mongodb/mongodb.conf
index 62faef68..f69acac7 100644
--- a/collectors/python.d.plugin/mongodb/mongodb.conf
+++ b/collectors/python.d.plugin/mongodb/mongodb.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, mongodb also supports the following:
diff --git a/collectors/python.d.plugin/monit/README.md b/collectors/python.d.plugin/monit/README.md
index 6d10240c..0f69aff2 100644
--- a/collectors/python.d.plugin/monit/README.md
+++ b/collectors/python.d.plugin/monit/README.md
@@ -31,3 +31,5 @@ local:
If no configuration is given, module will attempt to connect to monit as `http://localhost:2812`.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fmonit%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/monit/monit.chart.py b/collectors/python.d.plugin/monit/monit.chart.py
index 51943c0e..3ac0032c 100644
--- a/collectors/python.d.plugin/monit/monit.chart.py
+++ b/collectors/python.d.plugin/monit/monit.chart.py
@@ -6,13 +6,20 @@
import xml.etree.ElementTree as ET
from bases.FrameworkServices.UrlService import UrlService
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
# see enum State_Type from monit.h (https://bitbucket.org/tildeslash/monit/src/master/src/monit.h)
-MONIT_SERVICE_NAMES = ['Filesystem', 'Directory', 'File', 'Process', 'Host', 'System', 'Fifo', 'Program', 'Net']
+MONIT_SERVICE_NAMES = [
+ 'Filesystem',
+ 'Directory',
+ 'File',
+ 'Process',
+ 'Host',
+ 'System',
+ 'Fifo',
+ 'Program',
+ 'Net',
+]
+
DEFAULT_SERVICES_IDS = [0, 1, 2, 3, 4, 6, 7, 8]
# charts order (can be overridden if you want less charts, or different order)
@@ -90,10 +97,10 @@ CHARTS = {
class Service(UrlService):
def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name)
- base_url = self.configuration.get('url', 'http://localhost:2812')
- self.url = '{0}/_status?format=xml&level=full'.format(base_url)
self.order = ORDER
self.definitions = CHARTS
+ base_url = self.configuration.get('url', 'http://localhost:2812')
+ self.url = '{0}/_status?format=xml&level=full'.format(base_url)
def parse(self, data):
try:
@@ -105,15 +112,19 @@ class Service(UrlService):
def check(self):
self._manager = self._build_manager()
+
raw_data = self._get_raw_data()
if not raw_data:
return None
+
return bool(self.parse(raw_data))
def _get_data(self):
raw_data = self._get_raw_data()
+
if not raw_data:
return None
+
xml = self.parse(raw_data)
if not xml:
return None
@@ -121,6 +132,7 @@ class Service(UrlService):
data = {}
for service_id in DEFAULT_SERVICES_IDS:
service_category = MONIT_SERVICE_NAMES[service_id].lower()
+
if service_category == 'system':
self.debug("Skipping service from 'System' category, because it's useless in graphs")
continue
diff --git a/collectors/python.d.plugin/monit/monit.conf b/collectors/python.d.plugin/monit/monit.conf
index f9c26dbc..9a3fb693 100644
--- a/collectors/python.d.plugin/monit/monit.conf
+++ b/collectors/python.d.plugin/monit/monit.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, this plugin also supports the following:
diff --git a/collectors/python.d.plugin/mysql/README.md b/collectors/python.d.plugin/mysql/README.md
index e38098e7..498493a3 100644
--- a/collectors/python.d.plugin/mysql/README.md
+++ b/collectors/python.d.plugin/mysql/README.md
@@ -65,7 +65,6 @@ Here is an example for 3 servers:
```yaml
update_every : 10
priority : 90100
-retries : 5
local:
'my.cnf' : '/etc/mysql/my.cnf'
@@ -82,9 +81,10 @@ remote:
pass : 'bla'
host : 'example.org'
port : 9000
- retries : 20
```
If no configuration is given, module will attempt to connect to mysql server via unix socket at `/var/run/mysqld/mysqld.sock` without password and with username `root`
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fmysql%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/mysql/mysql.chart.py b/collectors/python.d.plugin/mysql/mysql.chart.py
index c4d1e8b3..20d32f81 100644
--- a/collectors/python.d.plugin/mysql/mysql.chart.py
+++ b/collectors/python.d.plugin/mysql/mysql.chart.py
@@ -6,10 +6,6 @@
from bases.FrameworkServices.MySQLService import MySQLService
-# default module values (can be overridden per job in `config`)
-# update_every = 3
-priority = 60000
-retries = 60
# query executed on MySQL server
QUERY_GLOBAL = 'SHOW GLOBAL STATUS;'
@@ -172,16 +168,19 @@ ORDER = [
'binlog_cache',
'binlog_stmt_cache',
'threads',
+ 'threads_creation_rate',
'thread_cache_misses',
'innodb_io',
'innodb_io_ops',
'innodb_io_pending_ops',
'innodb_log',
'innodb_os_log',
+ 'innodb_os_log_fsync_writes',
'innodb_os_log_io',
'innodb_cur_row_lock',
'innodb_rows',
'innodb_buffer_pool_pages',
+ 'innodb_buffer_pool_flush_pages_requests',
'innodb_buffer_pool_bytes',
'innodb_buffer_pool_read_ahead',
'innodb_buffer_pool_reqs',
@@ -206,14 +205,14 @@ ORDER = [
CHARTS = {
'net': {
- 'options': [None, 'mysql Bandwidth', 'kilobits/s', 'bandwidth', 'mysql.net', 'area'],
+ 'options': [None, 'Bandwidth', 'kilobits/s', 'bandwidth', 'mysql.net', 'area'],
'lines': [
- ['Bytes_received', 'in', 'incremental', 8, 1024],
- ['Bytes_sent', 'out', 'incremental', -8, 1024]
+ ['Bytes_received', 'in', 'incremental', 8, 1000],
+ ['Bytes_sent', 'out', 'incremental', -8, 1000]
]
},
'queries': {
- 'options': [None, 'mysql Queries', 'queries/s', 'queries', 'mysql.queries', 'line'],
+ 'options': [None, 'Queries', 'queries/s', 'queries', 'mysql.queries', 'line'],
'lines': [
['Queries', 'queries', 'incremental'],
['Questions', 'questions', 'incremental'],
@@ -221,7 +220,7 @@ CHARTS = {
]
},
'queries_type': {
- 'options': [None, 'mysql Query type', 'queries/s', 'query_types', 'mysql.queries_type', 'stacked'],
+ 'options': [None, 'Query Type', 'queries/s', 'query_types', 'mysql.queries_type', 'stacked'],
'lines': [
['Com_select', 'select', 'incremental'],
['Com_delete', 'delete', 'incremental'],
@@ -232,7 +231,7 @@ CHARTS = {
]
},
'handlers': {
- 'options': [None, 'mysql Handlers', 'handlers/s', 'handlers', 'mysql.handlers', 'line'],
+ 'options': [None, 'Handlers', 'handlers/s', 'handlers', 'mysql.handlers', 'line'],
'lines': [
['Handler_commit', 'commit', 'incremental'],
['Handler_delete', 'delete', 'incremental'],
@@ -251,14 +250,14 @@ CHARTS = {
]
},
'table_locks': {
- 'options': [None, 'mysql Tables Locks', 'locks/s', 'locks', 'mysql.table_locks', 'line'],
+ 'options': [None, 'Tables Locks', 'locks/s', 'locks', 'mysql.table_locks', 'line'],
'lines': [
['Table_locks_immediate', 'immediate', 'incremental'],
['Table_locks_waited', 'waited', 'incremental', -1, 1]
]
},
'join_issues': {
- 'options': [None, 'mysql Select Join Issues', 'joins/s', 'issues', 'mysql.join_issues', 'line'],
+ 'options': [None, 'Select Join Issues', 'joins/s', 'issues', 'mysql.join_issues', 'line'],
'lines': [
['Select_full_join', 'full_join', 'incremental'],
['Select_full_range_join', 'full_range_join', 'incremental'],
@@ -268,7 +267,7 @@ CHARTS = {
]
},
'sort_issues': {
- 'options': [None, 'mysql Sort Issues', 'issues/s', 'issues', 'mysql.sort_issues', 'line'],
+ 'options': [None, 'Sort Issues', 'issues/s', 'issues', 'mysql.sort_issues', 'line'],
'lines': [
['Sort_merge_passes', 'merge_passes', 'incremental'],
['Sort_range', 'range', 'incremental'],
@@ -276,7 +275,7 @@ CHARTS = {
]
},
'tmp': {
- 'options': [None, 'mysql Tmp Operations', 'counter', 'temporaries', 'mysql.tmp', 'line'],
+ 'options': [None, 'Tmp Operations', 'counter', 'temporaries', 'mysql.tmp', 'line'],
'lines': [
['Created_tmp_disk_tables', 'disk_tables', 'incremental'],
['Created_tmp_files', 'files', 'incremental'],
@@ -284,14 +283,14 @@ CHARTS = {
]
},
'connections': {
- 'options': [None, 'mysql Connections', 'connections/s', 'connections', 'mysql.connections', 'line'],
+ 'options': [None, 'Connections', 'connections/s', 'connections', 'mysql.connections', 'line'],
'lines': [
['Connections', 'all', 'incremental'],
['Aborted_connects', 'aborted', 'incremental']
]
},
'connections_active': {
- 'options': [None, 'mysql Connections Active', 'connections', 'connections', 'mysql.connections_active', 'line'],
+ 'options': [None, 'Connections Active', 'connections', 'connections', 'mysql.connections_active', 'line'],
'lines': [
['Threads_connected', 'active', 'absolute'],
['max_connections', 'limit', 'absolute'],
@@ -299,21 +298,26 @@ CHARTS = {
]
},
'binlog_cache': {
- 'options': [None, 'mysql Binlog Cache', 'transactions/s', 'binlog', 'mysql.binlog_cache', 'line'],
+ 'options': [None, 'Binlog Cache', 'transactions/s', 'binlog', 'mysql.binlog_cache', 'line'],
'lines': [
['Binlog_cache_disk_use', 'disk', 'incremental'],
['Binlog_cache_use', 'all', 'incremental']
]
},
'threads': {
- 'options': [None, 'mysql Threads', 'threads', 'threads', 'mysql.threads', 'line'],
+ 'options': [None, 'Threads', 'threads', 'threads', 'mysql.threads', 'line'],
'lines': [
['Threads_connected', 'connected', 'absolute'],
- ['Threads_created', 'created', 'incremental'],
['Threads_cached', 'cached', 'absolute', -1, 1],
['Threads_running', 'running', 'absolute'],
]
},
+ 'threads_creation_rate': {
+ 'options': [None, 'Threads Creation Rate', 'threads', 'threads/s', 'mysql.threads', 'line'],
+ 'lines': [
+ ['Threads_created', 'created', 'incremental'],
+ ]
+ },
'thread_cache_misses': {
'options': [None, 'mysql Threads Cache Misses', 'misses', 'threads', 'mysql.thread_cache_misses', 'area'],
'lines': [
@@ -321,14 +325,14 @@ CHARTS = {
]
},
'innodb_io': {
- 'options': [None, 'mysql InnoDB I/O Bandwidth', 'kilobytes/s', 'innodb', 'mysql.innodb_io', 'area'],
+ 'options': [None, 'InnoDB I/O Bandwidth', 'KiB/s', 'innodb', 'mysql.innodb_io', 'area'],
'lines': [
['Innodb_data_read', 'read', 'incremental', 1, 1024],
['Innodb_data_written', 'write', 'incremental', -1, 1024]
]
},
'innodb_io_ops': {
- 'options': [None, 'mysql InnoDB I/O Operations', 'operations/s', 'innodb', 'mysql.innodb_io_ops', 'line'],
+ 'options': [None, 'InnoDB I/O Operations', 'operations/s', 'innodb', 'mysql.innodb_io_ops', 'line'],
'lines': [
['Innodb_data_reads', 'reads', 'incremental'],
['Innodb_data_writes', 'writes', 'incremental', -1, 1],
@@ -336,7 +340,7 @@ CHARTS = {
]
},
'innodb_io_pending_ops': {
- 'options': [None, 'mysql InnoDB Pending I/O Operations', 'operations', 'innodb',
+ 'options': [None, 'InnoDB Pending I/O Operations', 'operations', 'innodb',
'mysql.innodb_io_pending_ops', 'line'],
'lines': [
['Innodb_data_pending_reads', 'reads', 'absolute'],
@@ -345,7 +349,7 @@ CHARTS = {
]
},
'innodb_log': {
- 'options': [None, 'mysql InnoDB Log Operations', 'operations/s', 'innodb', 'mysql.innodb_log', 'line'],
+ 'options': [None, 'InnoDB Log Operations', 'operations/s', 'innodb', 'mysql.innodb_log', 'line'],
'lines': [
['Innodb_log_waits', 'waits', 'incremental'],
['Innodb_log_write_requests', 'write_requests', 'incremental', -1, 1],
@@ -353,28 +357,33 @@ CHARTS = {
]
},
'innodb_os_log': {
- 'options': [None, 'mysql InnoDB OS Log Operations', 'operations', 'innodb', 'mysql.innodb_os_log', 'line'],
+ 'options': [None, 'InnoDB OS Log Pending Operations', 'operations', 'innodb', 'mysql.innodb_os_log', 'line'],
+ 'lines': [
+ ['Innodb_os_log_pending_fsyncs', 'fsyncs', 'absolute'],
+ ['Innodb_os_log_pending_writes', 'writes', 'absolute', -1, 1],
+ ]
+ },
+ 'innodb_os_log_fsync_writes': {
+ 'options': [None, 'InnoDB OS Log Operations', 'operations/s', 'innodb', 'mysql.innodb_os_log', 'line'],
'lines': [
['Innodb_os_log_fsyncs', 'fsyncs', 'incremental'],
- ['Innodb_os_log_pending_fsyncs', 'pending_fsyncs', 'absolute'],
- ['Innodb_os_log_pending_writes', 'pending_writes', 'absolute', -1, 1],
]
},
'innodb_os_log_io': {
- 'options': [None, 'mysql InnoDB OS Log Bandwidth', 'kilobytes/s', 'innodb', 'mysql.innodb_os_log_io', 'area'],
+ 'options': [None, 'InnoDB OS Log Bandwidth', 'KiB/s', 'innodb', 'mysql.innodb_os_log_io', 'area'],
'lines': [
['Innodb_os_log_written', 'write', 'incremental', -1, 1024],
]
},
'innodb_cur_row_lock': {
- 'options': [None, 'mysql InnoDB Current Row Locks', 'operations', 'innodb',
+ 'options': [None, 'InnoDB Current Row Locks', 'operations', 'innodb',
'mysql.innodb_cur_row_lock', 'area'],
'lines': [
['Innodb_row_lock_current_waits', 'current_waits', 'absolute']
]
},
'innodb_rows': {
- 'options': [None, 'mysql InnoDB Row Operations', 'operations/s', 'innodb', 'mysql.innodb_rows', 'area'],
+ 'options': [None, 'InnoDB Row Operations', 'operations/s', 'innodb', 'mysql.innodb_rows', 'area'],
'lines': [
['Innodb_rows_inserted', 'inserted', 'incremental'],
['Innodb_rows_read', 'read', 'incremental', 1, 1],
@@ -383,19 +392,25 @@ CHARTS = {
]
},
'innodb_buffer_pool_pages': {
- 'options': [None, 'mysql InnoDB Buffer Pool Pages', 'pages', 'innodb',
+ 'options': [None, 'InnoDB Buffer Pool Pages', 'pages', 'innodb',
'mysql.innodb_buffer_pool_pages', 'line'],
'lines': [
['Innodb_buffer_pool_pages_data', 'data', 'absolute'],
['Innodb_buffer_pool_pages_dirty', 'dirty', 'absolute', -1, 1],
['Innodb_buffer_pool_pages_free', 'free', 'absolute'],
- ['Innodb_buffer_pool_pages_flushed', 'flushed', 'incremental', -1, 1],
['Innodb_buffer_pool_pages_misc', 'misc', 'absolute', -1, 1],
['Innodb_buffer_pool_pages_total', 'total', 'absolute']
]
},
+ 'innodb_buffer_pool_flush_pages_requests': {
+ 'options': [None, 'InnoDB Buffer Pool Flush Pages Requests', 'requests/s', 'innodb',
+ 'mysql.innodb_buffer_pool_pages', 'line'],
+ 'lines': [
+ ['Innodb_buffer_pool_pages_flushed', 'flush pages', 'incremental'],
+ ]
+ },
'innodb_buffer_pool_bytes': {
- 'options': [None, 'mysql InnoDB Buffer Pool Bytes', 'MB', 'innodb', 'mysql.innodb_buffer_pool_bytes', 'area'],
+ 'options': [None, 'InnoDB Buffer Pool Bytes', 'MiB', 'innodb', 'mysql.innodb_buffer_pool_bytes', 'area'],
'lines': [
['Innodb_buffer_pool_bytes_data', 'data', 'absolute', 1, 1024 * 1024],
['Innodb_buffer_pool_bytes_dirty', 'dirty', 'absolute', -1, 1024 * 1024]
@@ -411,7 +426,7 @@ CHARTS = {
]
},
'innodb_buffer_pool_reqs': {
- 'options': [None, 'mysql InnoDB Buffer Pool Requests', 'requests/s', 'innodb',
+ 'options': [None, 'InnoDB Buffer Pool Requests', 'requests/s', 'innodb',
'mysql.innodb_buffer_pool_reqs', 'area'],
'lines': [
['Innodb_buffer_pool_read_requests', 'reads', 'incremental'],
@@ -419,7 +434,7 @@ CHARTS = {
]
},
'innodb_buffer_pool_ops': {
- 'options': [None, 'mysql InnoDB Buffer Pool Operations', 'operations/s', 'innodb',
+ 'options': [None, 'InnoDB Buffer Pool Operations', 'operations/s', 'innodb',
'mysql.innodb_buffer_pool_ops', 'area'],
'lines': [
['Innodb_buffer_pool_reads', 'disk reads', 'incremental'],
@@ -427,7 +442,7 @@ CHARTS = {
]
},
'qcache_ops': {
- 'options': [None, 'mysql QCache Operations', 'queries/s', 'qcache', 'mysql.qcache_ops', 'line'],
+ 'options': [None, 'QCache Operations', 'queries/s', 'qcache', 'mysql.qcache_ops', 'line'],
'lines': [
['Qcache_hits', 'hits', 'incremental'],
['Qcache_lowmem_prunes', 'lowmem prunes', 'incremental', -1, 1],
@@ -436,26 +451,26 @@ CHARTS = {
]
},
'qcache': {
- 'options': [None, 'mysql QCache Queries in Cache', 'queries', 'qcache', 'mysql.qcache', 'line'],
+ 'options': [None, 'QCache Queries in Cache', 'queries', 'qcache', 'mysql.qcache', 'line'],
'lines': [
['Qcache_queries_in_cache', 'queries', 'absolute']
]
},
'qcache_freemem': {
- 'options': [None, 'mysql QCache Free Memory', 'MB', 'qcache', 'mysql.qcache_freemem', 'area'],
+ 'options': [None, 'QCache Free Memory', 'MiB', 'qcache', 'mysql.qcache_freemem', 'area'],
'lines': [
['Qcache_free_memory', 'free', 'absolute', 1, 1024 * 1024]
]
},
'qcache_memblocks': {
- 'options': [None, 'mysql QCache Memory Blocks', 'blocks', 'qcache', 'mysql.qcache_memblocks', 'line'],
+ 'options': [None, 'QCache Memory Blocks', 'blocks', 'qcache', 'mysql.qcache_memblocks', 'line'],
'lines': [
['Qcache_free_blocks', 'free', 'absolute'],
['Qcache_total_blocks', 'total', 'absolute']
]
},
'key_blocks': {
- 'options': [None, 'mysql MyISAM Key Cache Blocks', 'blocks', 'myisam', 'mysql.key_blocks', 'line'],
+ 'options': [None, 'MyISAM Key Cache Blocks', 'blocks', 'myisam', 'mysql.key_blocks', 'line'],
'lines': [
['Key_blocks_unused', 'unused', 'absolute'],
['Key_blocks_used', 'used', 'absolute', -1, 1],
@@ -463,14 +478,14 @@ CHARTS = {
]
},
'key_requests': {
- 'options': [None, 'mysql MyISAM Key Cache Requests', 'requests/s', 'myisam', 'mysql.key_requests', 'area'],
+ 'options': [None, 'MyISAM Key Cache Requests', 'requests/s', 'myisam', 'mysql.key_requests', 'area'],
'lines': [
['Key_read_requests', 'reads', 'incremental'],
['Key_write_requests', 'writes', 'incremental', -1, 1]
]
},
'key_disk_ops': {
- 'options': [None, 'mysql MyISAM Key Cache Disk Operations', 'operations/s',
+ 'options': [None, 'MyISAM Key Cache Disk Operations', 'operations/s',
'myisam', 'mysql.key_disk_ops', 'area'],
'lines': [
['Key_reads', 'reads', 'incremental'],
@@ -478,19 +493,19 @@ CHARTS = {
]
},
'files': {
- 'options': [None, 'mysql Open Files', 'files', 'files', 'mysql.files', 'line'],
+ 'options': [None, 'Open Files', 'files', 'files', 'mysql.files', 'line'],
'lines': [
['Open_files', 'files', 'absolute']
]
},
'files_rate': {
- 'options': [None, 'mysql Opened Files Rate', 'files/s', 'files', 'mysql.files_rate', 'line'],
+ 'options': [None, 'Opened Files Rate', 'files/s', 'files', 'mysql.files_rate', 'line'],
'lines': [
['Opened_files', 'files', 'incremental']
]
},
'binlog_stmt_cache': {
- 'options': [None, 'mysql Binlog Statement Cache', 'statements/s', 'binlog',
+ 'options': [None, 'Binlog Statement Cache', 'statements/s', 'binlog',
'mysql.binlog_stmt_cache', 'line'],
'lines': [
['Binlog_stmt_cache_disk_use', 'disk', 'incremental'],
@@ -498,7 +513,7 @@ CHARTS = {
]
},
'connection_errors': {
- 'options': [None, 'mysql Connection Errors', 'connections/s', 'connections',
+ 'options': [None, 'Connection Errors', 'connections/s', 'connections',
'mysql.connection_errors', 'line'],
'lines': [
['Connection_errors_accept', 'accept', 'incremental'],
@@ -523,35 +538,35 @@ CHARTS = {
]
},
'galera_writesets': {
- 'options': [None, 'Replicated writesets', 'writesets/s', 'galera', 'mysql.galera_writesets', 'line'],
+ 'options': [None, 'Replicated Writesets', 'writesets/s', 'galera', 'mysql.galera_writesets', 'line'],
'lines': [
['wsrep_received', 'rx', 'incremental'],
['wsrep_replicated', 'tx', 'incremental', -1, 1],
]
},
'galera_bytes': {
- 'options': [None, 'Replicated bytes', 'KB/s', 'galera', 'mysql.galera_bytes', 'area'],
+ 'options': [None, 'Replicated Bytes', 'KiB/s', 'galera', 'mysql.galera_bytes', 'area'],
'lines': [
['wsrep_received_bytes', 'rx', 'incremental', 1, 1024],
['wsrep_replicated_bytes', 'tx', 'incremental', -1, 1024],
]
},
'galera_queue': {
- 'options': [None, 'Galera queue', 'writesets', 'galera', 'mysql.galera_queue', 'line'],
+ 'options': [None, 'Galera Queue', 'writesets', 'galera', 'mysql.galera_queue', 'line'],
'lines': [
['wsrep_local_recv_queue', 'rx', 'absolute'],
['wsrep_local_send_queue', 'tx', 'absolute', -1, 1],
]
},
'galera_conflicts': {
- 'options': [None, 'Replication conflicts', 'transactions', 'galera', 'mysql.galera_conflicts', 'area'],
+ 'options': [None, 'Replication Conflicts', 'transactions', 'galera', 'mysql.galera_conflicts', 'area'],
'lines': [
['wsrep_local_bf_aborts', 'bf_aborts', 'incremental'],
['wsrep_local_cert_failures', 'cert_fails', 'incremental', -1, 1],
]
},
'galera_flow_control': {
- 'options': [None, 'Flow control', 'millisec', 'galera', 'mysql.galera_flow_control', 'area'],
+ 'options': [None, 'Flow Control', 'millisec', 'galera', 'mysql.galera_flow_control', 'area'],
'lines': [
['wsrep_flow_control_paused_ns', 'paused', 'incremental', 1, 1000000],
]
@@ -564,7 +579,11 @@ class Service(MySQLService):
MySQLService.__init__(self, configuration=configuration, name=name)
self.order = ORDER
self.definitions = CHARTS
- self.queries = dict(global_status=QUERY_GLOBAL, slave_status=QUERY_SLAVE, variables=QUERY_VARIABLES)
+ self.queries = dict(
+ global_status=QUERY_GLOBAL,
+ slave_status=QUERY_SLAVE,
+ variables=QUERY_VARIABLES,
+ )
def _get_data(self):
diff --git a/collectors/python.d.plugin/mysql/mysql.conf b/collectors/python.d.plugin/mysql/mysql.conf
index b5956a2c..ac9b505b 100644
--- a/collectors/python.d.plugin/mysql/mysql.conf
+++ b/collectors/python.d.plugin/mysql/mysql.conf
@@ -27,11 +27,10 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +57,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, mysql also supports the following:
diff --git a/collectors/python.d.plugin/nginx/README.md b/collectors/python.d.plugin/nginx/README.md
index 007f45c7..7854105b 100644
--- a/collectors/python.d.plugin/nginx/README.md
+++ b/collectors/python.d.plugin/nginx/README.md
@@ -37,9 +37,10 @@ priority : 90100
local:
url : 'http://localhost/stub_status'
- retries : 10
```
Without configuration, module attempts to connect to `http://localhost/stub_status`
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fnginx%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/nginx/nginx.chart.py b/collectors/python.d.plugin/nginx/nginx.chart.py
index 09c6bbd3..84a5985e 100644
--- a/collectors/python.d.plugin/nginx/nginx.chart.py
+++ b/collectors/python.d.plugin/nginx/nginx.chart.py
@@ -5,38 +5,30 @@
from bases.FrameworkServices.UrlService import UrlService
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
-# default job configuration (overridden by python.d.plugin)
-# config = {'local': {
-# 'update_every': update_every,
-# 'retries': retries,
-# 'priority': priority,
-# 'url': 'http://localhost/stub_status'
-# }}
-
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['connections', 'requests', 'connection_status', 'connect_rate']
+ORDER = [
+ 'connections',
+ 'requests',
+ 'connection_status',
+ 'connect_rate',
+]
CHARTS = {
'connections': {
- 'options': [None, 'nginx Active Connections', 'connections', 'active connections',
+ 'options': [None, 'Active Connections', 'connections', 'active connections',
'nginx.connections', 'line'],
'lines': [
['active']
]
},
'requests': {
- 'options': [None, 'nginx Requests', 'requests/s', 'requests', 'nginx.requests', 'line'],
+ 'options': [None, 'Requests', 'requests/s', 'requests', 'nginx.requests', 'line'],
'lines': [
['requests', None, 'incremental']
]
},
'connection_status': {
- 'options': [None, 'nginx Active Connections by Status', 'connections', 'status',
+ 'options': [None, 'Active Connections by Status', 'connections', 'status',
'nginx.connection_status', 'line'],
'lines': [
['reading'],
@@ -45,7 +37,7 @@ CHARTS = {
]
},
'connect_rate': {
- 'options': [None, 'nginx Connections Rate', 'connections/s', 'connections rate',
+ 'options': [None, 'Connections Rate', 'connections/s', 'connections rate',
'nginx.connect_rate', 'line'],
'lines': [
['accepts', 'accepted', 'incremental'],
@@ -58,9 +50,9 @@ CHARTS = {
class Service(UrlService):
def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name)
- self.url = self.configuration.get('url', 'http://localhost/stub_status')
self.order = ORDER
self.definitions = CHARTS
+ self.url = self.configuration.get('url', 'http://localhost/stub_status')
def _get_data(self):
"""
diff --git a/collectors/python.d.plugin/nginx/nginx.conf b/collectors/python.d.plugin/nginx/nginx.conf
index 71c52106..4001b4bb 100644
--- a/collectors/python.d.plugin/nginx/nginx.conf
+++ b/collectors/python.d.plugin/nginx/nginx.conf
@@ -39,11 +39,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -70,7 +68,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, this plugin also supports the following:
diff --git a/collectors/python.d.plugin/nginx_plus/README.md b/collectors/python.d.plugin/nginx_plus/README.md
index 43ec867a..c20ce30a 100644
--- a/collectors/python.d.plugin/nginx_plus/README.md
+++ b/collectors/python.d.plugin/nginx_plus/README.md
@@ -123,3 +123,5 @@ local:
Without configuration, module fail to start.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fnginx_plus%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py b/collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py
index 1392f5a5..3082fdbe 100644
--- a/collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py
+++ b/collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py
@@ -16,12 +16,7 @@ except ImportError:
from bases.FrameworkServices.UrlService import UrlService
-# default module values (can be overridden per job in `config`)
-update_every = 1
-priority = 60000
-retries = 60
-# charts order (can be overridden if you want less charts, or different order)
ORDER = [
'requests_total',
'requests_current',
@@ -76,7 +71,7 @@ CHARTS = {
]
},
'ssl_memory_usage': {
- 'options': [None, 'Memory Usage', '%', 'ssl', 'nginx_plus.ssl_memory_usage', 'area'],
+ 'options': [None, 'Memory Usage', 'percentage', 'ssl', 'nginx_plus.ssl_memory_usage', 'area'],
'lines': [
['ssl_memory_usage', 'usage', 'absolute', 1, 100]
]
@@ -95,7 +90,7 @@ def cache_charts(cache):
charts = OrderedDict()
charts['{0}_traffic'.format(cache.name)] = {
- 'options': [None, 'Traffic', 'KB', family, 'nginx_plus.cache_traffic', 'stacked'],
+ 'options': [None, 'Traffic', 'KiB', family, 'nginx_plus.cache_traffic', 'stacked'],
'lines': [
['_'.join([cache.name, 'hit_bytes']), 'served', 'absolute', 1, 1024],
['_'.join([cache.name, 'miss_bytes_written']), 'written', 'absolute', 1, 1024],
@@ -103,7 +98,7 @@ def cache_charts(cache):
]
}
charts['{0}_memory_usage'.format(cache.name)] = {
- 'options': [None, 'Memory Usage', '%', family, 'nginx_plus.cache_memory_usage', 'area'],
+ 'options': [None, 'Memory Usage', 'percentage', family, 'nginx_plus.cache_memory_usage', 'area'],
'lines': [
['_'.join([cache.name, 'memory_usage']), 'usage', 'absolute', 1, 100],
]
@@ -200,7 +195,8 @@ def web_upstream_charts(wu):
'lines': dimensions('active')
}
charts['web_upstream_{name}_connections_usage'.format(name=wu.name)] = {
- 'options': [None, 'Peers Connections Usage', '%', family, 'nginx_plus.web_upstream_connections_usage', 'line'],
+ 'options': [None, 'Peers Connections Usage', 'percentage', family,
+ 'nginx_plus.web_upstream_connections_usage', 'line'],
'lines': dimensions('connections_usage', d=100)
}
# Traffic
@@ -223,7 +219,7 @@ def web_upstream_charts(wu):
# Response Time
for peer in wu:
charts['web_upstream_{0}_{1}_timings'.format(wu.name, peer.server)] = {
- 'options': [None, 'Peer "{0}" Timings'.format(peer.real_server), 'ms', family,
+ 'options': [None, 'Peer "{0}" Timings'.format(peer.real_server), 'milliseconds', family,
'nginx_plus.web_upstream_peer_timings', 'line'],
'lines': [
['_'.join([wu.name, peer.server, 'header_time']), 'header'],
@@ -232,7 +228,7 @@ def web_upstream_charts(wu):
}
# Memory Usage
charts['web_upstream_{name}_memory_usage'.format(name=wu.name)] = {
- 'options': [None, 'Memory Usage', '%', family, 'nginx_plus.web_upstream_memory_usage', 'area'],
+ 'options': [None, 'Memory Usage', 'percentage', family, 'nginx_plus.web_upstream_memory_usage', 'area'],
'lines': [
['_'.join([wu.name, 'memory_usage']), 'usage', 'absolute', 1, 100]
]
diff --git a/collectors/python.d.plugin/nginx_plus/nginx_plus.conf b/collectors/python.d.plugin/nginx_plus/nginx_plus.conf
index 7b5c8f43..201eb0eb 100644
--- a/collectors/python.d.plugin/nginx_plus/nginx_plus.conf
+++ b/collectors/python.d.plugin/nginx_plus/nginx_plus.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, nginx_plus also supports the following:
diff --git a/collectors/python.d.plugin/nsd/README.md b/collectors/python.d.plugin/nsd/README.md
index 02c302f4..b118657d 100644
--- a/collectors/python.d.plugin/nsd/README.md
+++ b/collectors/python.d.plugin/nsd/README.md
@@ -52,3 +52,5 @@ It produces:
Configuration is not needed.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fnsd%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/nsd/nsd.chart.py b/collectors/python.d.plugin/nsd/nsd.chart.py
index d713f46b..77b0d7bb 100644
--- a/collectors/python.d.plugin/nsd/nsd.chart.py
+++ b/collectors/python.d.plugin/nsd/nsd.chart.py
@@ -7,13 +7,20 @@ import re
from bases.FrameworkServices.ExecutableService import ExecutableService
-# default module values (can be overridden per job in `config`)
-priority = 60000
-retries = 5
+
update_every = 30
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['queries', 'zones', 'protocol', 'type', 'transfer', 'rcode']
+NSD_CONTROL_COMMAND = 'nsd-control stats_noreset'
+REGEX = re.compile(r'([A-Za-z0-9.]+)=(\d+)')
+
+ORDER = [
+ 'queries',
+ 'zones',
+ 'protocol',
+ 'type',
+ 'transfer',
+ 'rcode',
+]
CHARTS = {
'queries': {
@@ -79,22 +86,21 @@ CHARTS = {
class Service(ExecutableService):
def __init__(self, configuration=None, name=None):
- ExecutableService.__init__(
- self, configuration=configuration, name=name)
- self.command = 'nsd-control stats_noreset'
+ ExecutableService.__init__(self, configuration=configuration, name=name)
self.order = ORDER
self.definitions = CHARTS
- self.regex = re.compile(r'([A-Za-z0-9.]+)=(\d+)')
+ self.command = NSD_CONTROL_COMMAND
def _get_data(self):
lines = self._get_raw_data()
if not lines:
return None
- r = self.regex
- stats = dict((k.replace('.', '_'), int(v))
- for k, v in r.findall(''.join(lines)))
+ stats = dict(
+ (k.replace('.', '_'), int(v)) for k, v in REGEX.findall(''.join(lines))
+ )
stats.setdefault('num_opcode_NOTIFY', 0)
stats.setdefault('num_type_TYPE252', 0)
stats.setdefault('num_type_TYPE255', 0)
+
return stats
diff --git a/collectors/python.d.plugin/nsd/nsd.conf b/collectors/python.d.plugin/nsd/nsd.conf
index 078e9721..77a8a317 100644
--- a/collectors/python.d.plugin/nsd/nsd.conf
+++ b/collectors/python.d.plugin/nsd/nsd.conf
@@ -28,11 +28,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -59,7 +57,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, nsd also supports the following:
diff --git a/collectors/python.d.plugin/ntpd/README.md b/collectors/python.d.plugin/ntpd/README.md
index b0fa17fd..d33fd877 100644
--- a/collectors/python.d.plugin/ntpd/README.md
+++ b/collectors/python.d.plugin/ntpd/README.md
@@ -69,3 +69,5 @@ otherhost:
If no configuration is given, module will attempt to connect to `ntpd` on `::1:123` or `127.0.0.1:123` and show charts for the systemvars. Use `show_peers: yes` to also show the charts for configured peers. Local peers in the range `127.0.0.0/8` are hidden by default, use `peer_filter: ''` to show all peers.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fntpd%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/ntpd/ntpd.chart.py b/collectors/python.d.plugin/ntpd/ntpd.chart.py
index 79d557c8..5a5477e6 100644
--- a/collectors/python.d.plugin/ntpd/ntpd.chart.py
+++ b/collectors/python.d.plugin/ntpd/ntpd.chart.py
@@ -9,10 +9,6 @@ import re
from bases.FrameworkServices.SocketService import SocketService
-# default module values
-update_every = 1
-priority = 60000
-retries = 60
# NTP Control Message Protocol constants
MODE = 6
@@ -54,13 +50,15 @@ ORDER = [
CHARTS = {
'sys_offset': {
- 'options': [None, 'Combined offset of server relative to this host', 'ms', 'system', 'ntpd.sys_offset', 'area'],
+ 'options': [None, 'Combined offset of server relative to this host', 'milliseconds',
+ 'system', 'ntpd.sys_offset', 'area'],
'lines': [
['offset', 'offset', 'absolute', 1, PRECISION]
]
},
'sys_jitter': {
- 'options': [None, 'Combined system jitter and clock jitter', 'ms', 'system', 'ntpd.sys_jitter', 'line'],
+ 'options': [None, 'Combined system jitter and clock jitter', 'milliseconds',
+ 'system', 'ntpd.sys_jitter', 'line'],
'lines': [
['sys_jitter', 'system', 'absolute', 1, PRECISION],
['clk_jitter', 'clock', 'absolute', 1, PRECISION]
@@ -79,14 +77,14 @@ CHARTS = {
]
},
'sys_rootdelay': {
- 'options': [None, 'Total roundtrip delay to the primary reference clock', 'ms', 'system',
+ 'options': [None, 'Total roundtrip delay to the primary reference clock', 'milliseconds', 'system',
'ntpd.sys_rootdelay', 'area'],
'lines': [
['rootdelay', 'delay', 'absolute', 1, PRECISION]
]
},
'sys_rootdisp': {
- 'options': [None, 'Total root dispersion to the primary reference clock', 'ms', 'system',
+ 'options': [None, 'Total root dispersion to the primary reference clock', 'milliseconds', 'system',
'ntpd.sys_rootdisp', 'area'],
'lines': [
['rootdisp', 'dispersion', 'absolute', 1, PRECISION]
@@ -115,27 +113,27 @@ CHARTS = {
PEER_CHARTS = {
'peer_offset': {
- 'options': [None, 'Filter offset', 'ms', 'peers', 'ntpd.peer_offset', 'line'],
+ 'options': [None, 'Filter offset', 'milliseconds', 'peers', 'ntpd.peer_offset', 'line'],
'lines': []
},
'peer_delay': {
- 'options': [None, 'Filter delay', 'ms', 'peers', 'ntpd.peer_delay', 'line'],
+ 'options': [None, 'Filter delay', 'milliseconds', 'peers', 'ntpd.peer_delay', 'line'],
'lines': []
},
'peer_dispersion': {
- 'options': [None, 'Filter dispersion', 'ms', 'peers', 'ntpd.peer_dispersion', 'line'],
+ 'options': [None, 'Filter dispersion', 'milliseconds', 'peers', 'ntpd.peer_dispersion', 'line'],
'lines': []
},
'peer_jitter': {
- 'options': [None, 'Filter jitter', 'ms', 'peers', 'ntpd.peer_jitter', 'line'],
+ 'options': [None, 'Filter jitter', 'milliseconds', 'peers', 'ntpd.peer_jitter', 'line'],
'lines': []
},
'peer_xleave': {
- 'options': [None, 'Interleave delay', 'ms', 'peers', 'ntpd.peer_xleave', 'line'],
+ 'options': [None, 'Interleave delay', 'milliseconds', 'peers', 'ntpd.peer_xleave', 'line'],
'lines': []
},
'peer_rootdelay': {
- 'options': [None, 'Total roundtrip delay to the primary reference clock', 'ms', 'peers',
+ 'options': [None, 'Total roundtrip delay to the primary reference clock', 'milliseconds', 'peers',
'ntpd.peer_rootdelay', 'line'],
'lines': []
},
@@ -235,7 +233,6 @@ class Service(SocketService):
SocketService.__init__(self, configuration=configuration, name=name)
self.order = list(ORDER)
self.definitions = dict(CHARTS)
-
self.port = 'ntp'
self.dgram_socket = True
self.system = System()
@@ -244,7 +241,6 @@ class Service(SocketService):
self.retries = 0
self.show_peers = self.configuration.get('show_peers', False)
self.peer_rescan = self.configuration.get('peer_rescan', 60)
-
if self.show_peers:
self.definitions.update(PEER_CHARTS)
diff --git a/collectors/python.d.plugin/ntpd/ntpd.conf b/collectors/python.d.plugin/ntpd/ntpd.conf
index 7adc4074..80bd468d 100644
--- a/collectors/python.d.plugin/ntpd/ntpd.conf
+++ b/collectors/python.d.plugin/ntpd/ntpd.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# ----------------------------------------------------------------------
# JOBS (data collection sources)
@@ -52,7 +50,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
#
# Additionally to the above, ntp also supports the following:
#
diff --git a/collectors/python.d.plugin/nvidia_smi/README.md b/collectors/python.d.plugin/nvidia_smi/README.md
index 06acfc29..48b61195 100644
--- a/collectors/python.d.plugin/nvidia_smi/README.md
+++ b/collectors/python.d.plugin/nvidia_smi/README.md
@@ -36,4 +36,5 @@ Sample:
```yaml
poll_seconds: 1
-``` \ No newline at end of file
+```
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fnvidia_smi%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py b/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py
index c3fff621..7cb816c0 100644
--- a/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py
+++ b/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py
@@ -15,6 +15,8 @@ disabled_by_default = True
NVIDIA_SMI = 'nvidia-smi'
+BAD_VALUE = 'N/A'
+
EMPTY_ROW = ''
EMPTY_ROW_LIMIT = 500
POLLER_BREAK_ROW = '</nvidia_smi_log>'
@@ -47,39 +49,39 @@ def gpu_charts(gpu):
charts = {
PCI_BANDWIDTH: {
- 'options': [None, 'PCI Express Bandwidth Utilization', 'KB/s', fam, 'nvidia_smi.pci_bandwidth', 'area'],
+ 'options': [None, 'PCI Express Bandwidth Utilization', 'KiB/s', fam, 'nvidia_smi.pci_bandwidth', 'area'],
'lines': [
['rx_util', 'rx', 'absolute', 1, 1],
['tx_util', 'tx', 'absolute', 1, -1],
]
},
FAN_SPEED: {
- 'options': [None, 'Fan Speed', '%', fam, 'nvidia_smi.fan_speed', 'line'],
+ 'options': [None, 'Fan Speed', 'percentage', fam, 'nvidia_smi.fan_speed', 'line'],
'lines': [
['fan_speed', 'speed'],
]
},
GPU_UTIL: {
- 'options': [None, 'GPU Utilization', '%', fam, 'nvidia_smi.gpu_utilization', 'line'],
+ 'options': [None, 'GPU Utilization', 'percentage', fam, 'nvidia_smi.gpu_utilization', 'line'],
'lines': [
['gpu_util', 'utilization'],
]
},
MEM_UTIL: {
- 'options': [None, 'Memory Bandwidth Utilization', '%', fam, 'nvidia_smi.mem_utilization', 'line'],
+ 'options': [None, 'Memory Bandwidth Utilization', 'percentage', fam, 'nvidia_smi.mem_utilization', 'line'],
'lines': [
['memory_util', 'utilization'],
]
},
ENCODER_UTIL: {
- 'options': [None, 'Encoder/Decoder Utilization', '%', fam, 'nvidia_smi.encoder_utilization', 'line'],
+ 'options': [None, 'Encoder/Decoder Utilization', 'percentage', fam, 'nvidia_smi.encoder_utilization', 'line'],
'lines': [
['encoder_util', 'encoder'],
['decoder_util', 'decoder'],
]
},
MEM_ALLOCATED: {
- 'options': [None, 'Memory Allocated', 'MB', fam, 'nvidia_smi.memory_allocated', 'line'],
+ 'options': [None, 'Memory Allocated', 'MiB', fam, 'nvidia_smi.memory_allocated', 'line'],
'lines': [
['fb_memory_usage', 'used'],
]
@@ -206,6 +208,15 @@ def handle_attr_error(method):
return on_call
+def handle_value_error(method):
+ def on_call(*args, **kwargs):
+ try:
+ return method(*args, **kwargs)
+ except ValueError:
+ return None
+ return on_call
+
+
class GPU:
def __init__(self, num, root):
self.num = num
@@ -272,6 +283,7 @@ class GPU:
def mem_clock(self):
return self.root.find('clocks').find('mem_clock').text.split()[0]
+ @handle_value_error
@handle_attr_error
def power_draw(self):
return float(self.root.find('power_readings').find('power_draw').text.split()[0]) * 100
@@ -294,7 +306,9 @@ class GPU:
'power_draw': self.power_draw(),
}
- return dict(('gpu{0}_{1}'.format(self.num, k), v) for k, v in data.items() if v is not None)
+ return dict(
+ ('gpu{0}_{1}'.format(self.num, k), v) for k, v in data.items() if v is not None and v != BAD_VALUE
+ )
class Service(SimpleService):
@@ -302,7 +316,6 @@ class Service(SimpleService):
super(Service, self).__init__(configuration=configuration, name=name)
self.order = list()
self.definitions = dict()
-
poll = int(configuration.get('poll_seconds', 1))
self.poller = NvidiaSMIPoller(poll)
diff --git a/collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf b/collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf
index e1bcf3fa..53e544a5 100644
--- a/collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf
+++ b/collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, example also supports the following:
diff --git a/collectors/python.d.plugin/openldap/README.md b/collectors/python.d.plugin/openldap/README.md
index 938535bc..629cc153 100644
--- a/collectors/python.d.plugin/openldap/README.md
+++ b/collectors/python.d.plugin/openldap/README.md
@@ -55,3 +55,5 @@ openldap:
```
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fopenldap%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/openldap/openldap.chart.py b/collectors/python.d.plugin/openldap/openldap.chart.py
index 6342d386..768ed01e 100644
--- a/collectors/python.d.plugin/openldap/openldap.chart.py
+++ b/collectors/python.d.plugin/openldap/openldap.chart.py
@@ -11,8 +11,6 @@ except ImportError:
from bases.FrameworkServices.SimpleService import SimpleService
-# default module values (can be overridden per job in `config`)
-priority = 60000
DEFAULT_SERVER = 'localhost'
DEFAULT_PORT = '389'
@@ -36,7 +34,7 @@ CHARTS = {
]
},
'bytes_sent': {
- 'options': [None, 'Traffic', 'KB/s', 'ldap', 'openldap.traffic_stats', 'line'],
+ 'options': [None, 'Traffic', 'KiB/s', 'ldap', 'openldap.traffic_stats', 'line'],
'lines': [
['bytes_sent', 'sent', 'incremental', 1, 1024]
]
@@ -136,13 +134,11 @@ class Service(SimpleService):
SimpleService.__init__(self, configuration=configuration, name=name)
self.order = ORDER
self.definitions = CHARTS
-
self.server = configuration.get('server', DEFAULT_SERVER)
self.port = configuration.get('port', DEFAULT_PORT)
self.username = configuration.get('username')
self.password = configuration.get('password')
self.timeout = configuration.get('timeout', DEFAULT_TIMEOUT)
-
self.alive = False
self.conn = None
diff --git a/collectors/python.d.plugin/openldap/openldap.conf b/collectors/python.d.plugin/openldap/openldap.conf
index 662cc58c..6182b3ee 100644
--- a/collectors/python.d.plugin/openldap/openldap.conf
+++ b/collectors/python.d.plugin/openldap/openldap.conf
@@ -28,11 +28,9 @@ update_every: 10
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -59,7 +57,7 @@ update_every: 10
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/ovpn_status_log/README.md b/collectors/python.d.plugin/ovpn_status_log/README.md
index be1ea279..bcd1f00e 100644
--- a/collectors/python.d.plugin/ovpn_status_log/README.md
+++ b/collectors/python.d.plugin/ovpn_status_log/README.md
@@ -30,3 +30,5 @@ default
```
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fovpn_status_log%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py
index 64d7062d..dc7a6002 100644
--- a/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py
+++ b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py
@@ -3,15 +3,18 @@
# Author: l2isbad
# SPDX-License-Identifier: GPL-3.0-or-later
-from re import compile as r_compile
+import re
from bases.FrameworkServices.SimpleService import SimpleService
-priority = 60000
-retries = 60
+
update_every = 10
-ORDER = ['users', 'traffic']
+ORDER = [
+ 'users',
+ 'traffic',
+]
+
CHARTS = {
'users': {
'options': [None, 'OpenVPN Active Users', 'active users', 'users', 'openvpn_status.users', 'line'],
@@ -20,15 +23,20 @@ CHARTS = {
]
},
'traffic': {
- 'options': [None, 'OpenVPN Traffic', 'KB/s', 'traffic', 'openvpn_status.traffic', 'area'],
+ 'options': [None, 'OpenVPN Traffic', 'KiB/s', 'traffic', 'openvpn_status.traffic', 'area'],
'lines': [
- ['bytes_in', 'in', 'incremental', 1, 1 << 10], ['bytes_out', 'out', 'incremental', 1, -1 << 10]
+ ['bytes_in', 'in', 'incremental', 1, 1 << 10],
+ ['bytes_out', 'out', 'incremental', -1, 1 << 10]
]
}
}
-TLS_REGEX = r_compile(r'(?:[0-9a-f:]+|(?:\d{1,3}(?:\.\d{1,3}){3}(?::\d+)?)) (?P<bytes_in>\d+) (?P<bytes_out>\d+)')
-STATIC_KEY_REGEX = r_compile(r'TCP/[A-Z]+ (?P<direction>(?:read|write)) bytes,(?P<bytes>\d+)')
+TLS_REGEX = re.compile(
+ r'(?:[0-9a-f]+:[0-9a-f:]+|(?:\d{1,3}(?:\.\d{1,3}){3}(?::\d+)?)) (?P<bytes_in>\d+) (?P<bytes_out>\d+)'
+)
+STATIC_KEY_REGEX = re.compile(
+ r'TCP/[A-Z]+ (?P<direction>(?:read|write)) bytes,(?P<bytes>\d+)'
+)
class Service(SimpleService):
diff --git a/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.conf b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.conf
index 6fb35a53..1d71f6b8 100644
--- a/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.conf
+++ b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, openvpn status log also supports the following:
diff --git a/collectors/python.d.plugin/phpfpm/README.md b/collectors/python.d.plugin/phpfpm/README.md
index 66930463..d3aa85a7 100644
--- a/collectors/python.d.plugin/phpfpm/README.md
+++ b/collectors/python.d.plugin/phpfpm/README.md
@@ -32,9 +32,10 @@ priority : 90100
local:
url : 'http://localhost/status'
- retries : 10
```
Without configuration, module attempts to connect to `http://localhost/status`
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fphpfpm%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/phpfpm/phpfpm.chart.py b/collectors/python.d.plugin/phpfpm/phpfpm.chart.py
index a3f0963f..70091e23 100644
--- a/collectors/python.d.plugin/phpfpm/phpfpm.chart.py
+++ b/collectors/python.d.plugin/phpfpm/phpfpm.chart.py
@@ -9,20 +9,8 @@ import re
from bases.FrameworkServices.UrlService import UrlService
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
-# default job configuration (overridden by python.d.plugin)
-# config = {'local': {
-# 'update_every': update_every,
-# 'retries': retries,
-# 'priority': priority,
-# 'url': 'http://localhost/status?full&json'
-# }}
-
-# charts order (can be overridden if you want less charts, or different order)
+REGEX = re.compile(r'([a-z][a-z ]+): ([\d.]+)')
POOL_INFO = [
('active processes', 'active'),
@@ -50,7 +38,14 @@ CALC = [
('avg', average)
]
-ORDER = ['connections', 'requests', 'performance', 'request_duration', 'request_cpu', 'request_mem']
+ORDER = [
+ 'connections',
+ 'requests',
+ 'performance',
+ 'request_duration',
+ 'request_cpu',
+ 'request_mem',
+]
CHARTS = {
'connections': {
@@ -85,7 +80,7 @@ CHARTS = {
]
},
'request_cpu': {
- 'options': [None, 'PHP-FPM Request CPU', 'percent', 'request CPU', 'phpfpm.request_cpu', 'line'],
+ 'options': [None, 'PHP-FPM Request CPU', 'percentage', 'request CPU', 'phpfpm.request_cpu', 'line'],
'lines': [
['minReqCpu', 'min'],
['maxReqCpu', 'max'],
@@ -93,7 +88,7 @@ CHARTS = {
]
},
'request_mem': {
- 'options': [None, 'PHP-FPM Request Memory', 'kilobytes', 'request memory', 'phpfpm.request_mem', 'line'],
+ 'options': [None, 'PHP-FPM Request Memory', 'KB', 'request memory', 'phpfpm.request_mem', 'line'],
'lines': [
['minReqMem', 'min', 'absolute', 1, 1024],
['maxReqMem', 'max', 'absolute', 1, 1024],
@@ -106,14 +101,14 @@ CHARTS = {
class Service(UrlService):
def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name)
- self.url = self.configuration.get('url', 'http://localhost/status?full&json')
self.order = ORDER
self.definitions = CHARTS
- self.regex = re.compile(r'([a-z][a-z ]+): ([\d.]+)')
+ self.url = self.configuration.get('url', 'http://localhost/status?full&json')
self.json = '&json' in self.url or '?json' in self.url
self.json_full = self.url.endswith(('?full&json', '?json&full'))
- self.if_all_processes_running = dict([(c_name + p_name, 0) for c_name, func in CALC
- for metric, p_name in PER_PROCESS_INFO])
+ self.if_all_processes_running = dict(
+ [(c_name + p_name, 0) for c_name, func in CALC for metric, p_name in PER_PROCESS_INFO]
+ )
def _get_data(self):
"""
@@ -124,7 +119,7 @@ class Service(UrlService):
if not raw:
return None
- raw_json = parse_raw_data_(is_json=self.json, regex=self.regex, raw_data=raw)
+ raw_json = parse_raw_data_(is_json=self.json, raw_data=raw)
# Per Pool info: active connections, requests and performance charts
to_netdata = fetch_data_(raw_data=raw_json, metrics_list=POOL_INFO)
@@ -160,7 +155,7 @@ def fetch_data_(raw_data, metrics_list, pid=''):
return result
-def parse_raw_data_(is_json, regex, raw_data):
+def parse_raw_data_(is_json, raw_data):
"""
:param is_json: bool
:param regex: compiled regular expr
@@ -174,4 +169,4 @@ def parse_raw_data_(is_json, regex, raw_data):
return dict()
else:
raw_data = ' '.join(raw_data.split())
- return dict(regex.findall(raw_data))
+ return dict(REGEX.findall(raw_data))
diff --git a/collectors/python.d.plugin/phpfpm/phpfpm.conf b/collectors/python.d.plugin/phpfpm/phpfpm.conf
index 571eb915..d3185390 100644
--- a/collectors/python.d.plugin/phpfpm/phpfpm.conf
+++ b/collectors/python.d.plugin/phpfpm/phpfpm.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, PHP-FPM also supports the following:
diff --git a/collectors/python.d.plugin/portcheck/README.md b/collectors/python.d.plugin/portcheck/README.md
index f1338d57..8f289c8d 100644
--- a/collectors/python.d.plugin/portcheck/README.md
+++ b/collectors/python.d.plugin/portcheck/README.md
@@ -33,3 +33,5 @@ server:
* Currently, the accuracy of the latency is low and should be used as reference only.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fportcheck%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/portcheck/portcheck.chart.py b/collectors/python.d.plugin/portcheck/portcheck.chart.py
index e86f8254..8479e38e 100644
--- a/collectors/python.d.plugin/portcheck/portcheck.chart.py
+++ b/collectors/python.d.plugin/portcheck/portcheck.chart.py
@@ -12,9 +12,6 @@ except ImportError:
from bases.FrameworkServices.SimpleService import SimpleService
-# default module values (can be overridden per job in `config`)
-priority = 60000
-retries = 60
PORT_LATENCY = 'connect'
@@ -26,7 +23,7 @@ ORDER = ['latency', 'status']
CHARTS = {
'latency': {
- 'options': [None, 'TCP connect latency', 'ms', 'latency', 'portcheck.latency', 'line'],
+ 'options': [None, 'TCP connect latency', 'milliseconds', 'latency', 'portcheck.latency', 'line'],
'lines': [
[PORT_LATENCY, 'connect', 'absolute', 100, 1000]
]
@@ -121,7 +118,7 @@ class Service(SimpleService):
:return: dict
"""
- af, _, proto, _, sa = socket_config
+ _, _, _, _, sa = socket_config
port = str(sa[1])
try:
self.debug('Connecting socket to "{address}", port {port}'.format(address=sa[0], port=port))
diff --git a/collectors/python.d.plugin/portcheck/portcheck.conf b/collectors/python.d.plugin/portcheck/portcheck.conf
index b3dd8bd3..df67824b 100644
--- a/collectors/python.d.plugin/portcheck/portcheck.conf
+++ b/collectors/python.d.plugin/portcheck/portcheck.conf
@@ -27,6 +27,10 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
# chart_cleanup sets the default chart cleanup interval in iterations.
# A chart is marked as obsolete if it has not been updated
# 'chart_cleanup' iterations in a row.
@@ -60,7 +64,7 @@ chart_cleanup: 0
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # [optional] the JOB's data collection frequency
# priority: 60000 # [optional] the JOB's order on the dashboard
-# retries: 60 # [optional] the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# timeout: 1 # [optional] the socket timeout when connecting
# host: 'dns or ip' # [required] the remote host address in either IPv4, IPv6 or as DNS name.
# port: 22 # [required] the port number to check. Specify an integer, not service name.
diff --git a/collectors/python.d.plugin/postfix/README.md b/collectors/python.d.plugin/postfix/README.md
index 77c95ff4..e2147ac9 100644
--- a/collectors/python.d.plugin/postfix/README.md
+++ b/collectors/python.d.plugin/postfix/README.md
@@ -13,3 +13,5 @@ It produces only two charts:
Configuration is not needed.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fpostfix%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/postfix/postfix.chart.py b/collectors/python.d.plugin/postfix/postfix.chart.py
index bdbd0fee..b650514e 100644
--- a/collectors/python.d.plugin/postfix/postfix.chart.py
+++ b/collectors/python.d.plugin/postfix/postfix.chart.py
@@ -5,13 +5,12 @@
from bases.FrameworkServices.ExecutableService import ExecutableService
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
+POSTQUEUE_COMMAND = 'postqueue -p'
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['qemails', 'qsize']
+ORDER = [
+ 'qemails',
+ 'qsize',
+]
CHARTS = {
'qemails': {
@@ -21,7 +20,7 @@ CHARTS = {
]
},
'qsize': {
- 'options': [None, 'Postfix Queue Emails Size', 'emails size in KB', 'queue', 'postfix.qsize', 'area'],
+ 'options': [None, 'Postfix Queue Emails Size', 'KiB', 'queue', 'postfix.qsize', 'area'],
'lines': [
['size', None, 'absolute']
]
@@ -32,9 +31,9 @@ CHARTS = {
class Service(ExecutableService):
def __init__(self, configuration=None, name=None):
ExecutableService.__init__(self, configuration=configuration, name=name)
- self.command = 'postqueue -p'
self.order = ORDER
self.definitions = CHARTS
+ self.command = POSTQUEUE_COMMAND
def _get_data(self):
"""
diff --git a/collectors/python.d.plugin/postfix/postfix.conf b/collectors/python.d.plugin/postfix/postfix.conf
index e0d5a5f8..a4d2472e 100644
--- a/collectors/python.d.plugin/postfix/postfix.conf
+++ b/collectors/python.d.plugin/postfix/postfix.conf
@@ -28,11 +28,9 @@ update_every: 10
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -59,7 +57,7 @@ update_every: 10
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, postfix also supports the following:
diff --git a/collectors/python.d.plugin/postgres/README.md b/collectors/python.d.plugin/postgres/README.md
index e7b108d3..9939a0c4 100644
--- a/collectors/python.d.plugin/postgres/README.md
+++ b/collectors/python.d.plugin/postgres/README.md
@@ -66,3 +66,5 @@ tcp:
When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:5432`.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fpostgres%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/postgres/postgres.chart.py b/collectors/python.d.plugin/postgres/postgres.chart.py
index 7f43877c..e988eec3 100644
--- a/collectors/python.d.plugin/postgres/postgres.chart.py
+++ b/collectors/python.d.plugin/postgres/postgres.chart.py
@@ -16,13 +16,34 @@ except ImportError:
from bases.FrameworkServices.SimpleService import SimpleService
-# default module values
-update_every = 1
-priority = 60000
-retries = 60
+
+DEFAULT_PORT = 5432
+DEFAULT_USER = 'postgres'
+DEFAULT_CONNECT_TIMEOUT = 2 # seconds
+DEFAULT_STATEMENT_TIMEOUT = 5000 # ms
+
+
+WAL = 'WAL'
+ARCHIVE = 'ARCHIVE'
+BACKENDS = 'BACKENDS'
+TABLE_STATS = 'TABLE_STATS'
+INDEX_STATS = 'INDEX_STATS'
+DATABASE = 'DATABASE'
+BGWRITER = 'BGWRITER'
+LOCKS = 'LOCKS'
+DATABASES = 'DATABASES'
+STANDBY = 'STANDBY'
+REPLICATION_SLOT = 'REPLICATION_SLOT'
+STANDBY_DELTA = 'STANDBY_DELTA'
+REPSLOT_FILES = 'REPSLOT_FILES'
+IF_SUPERUSER = 'IF_SUPERUSER'
+SERVER_VERSION = 'SERVER_VERSION'
+AUTOVACUUM = 'AUTOVACUUM'
+DIFF_LSN = 'DIFF_LSN'
+WAL_WRITES = 'WAL_WRITES'
METRICS = {
- 'DATABASE': [
+ DATABASE: [
'connections',
'xact_commit',
'xact_rollback',
@@ -38,32 +59,32 @@ METRICS = {
'temp_bytes',
'size'
],
- 'BACKENDS': [
+ BACKENDS: [
'backends_active',
'backends_idle'
],
- 'INDEX_STATS': [
+ INDEX_STATS: [
'index_count',
'index_size'
],
- 'TABLE_STATS': [
+ TABLE_STATS: [
'table_size',
'table_count'
],
- 'WAL': [
+ WAL: [
'written_wal',
'recycled_wal',
'total_wal'
],
- 'WAL_WRITES': [
+ WAL_WRITES: [
'wal_writes'
],
- 'ARCHIVE': [
+ ARCHIVE: [
'ready_count',
'done_count',
'file_count'
],
- 'BGWRITER': [
+ BGWRITER: [
'checkpoint_scheduled',
'checkpoint_requested',
'buffers_checkpoint',
@@ -73,7 +94,7 @@ METRICS = {
'buffers_alloc',
'buffers_backend_fsync'
],
- 'LOCKS': [
+ LOCKS: [
'ExclusiveLock',
'RowShareLock',
'SIReadLock',
@@ -84,27 +105,61 @@ METRICS = {
'ShareLock',
'RowExclusiveLock'
],
- 'AUTOVACUUM': [
+ AUTOVACUUM: [
'analyze',
'vacuum_analyze',
'vacuum',
'vacuum_freeze',
'brin_summarize'
],
- 'STANDBY_DELTA': [
+ STANDBY_DELTA: [
'sent_delta',
'write_delta',
'flush_delta',
'replay_delta'
],
- 'REPSLOT_FILES': [
+ REPSLOT_FILES: [
'replslot_wal_keep',
'replslot_files'
]
}
-QUERIES = {
- 'WAL': """
+NO_VERSION = 0
+DEFAULT = 'DEFAULT'
+V96 = 'V96'
+V10 = 'V10'
+V11 = 'V11'
+
+
+QUERY_WAL = {
+ DEFAULT: """
+SELECT
+ count(*) as total_wal,
+ count(*) FILTER (WHERE type = 'recycled') AS recycled_wal,
+ count(*) FILTER (WHERE type = 'written') AS written_wal
+FROM
+ (SELECT
+ wal.name,
+ pg_walfile_name(
+ CASE pg_is_in_recovery()
+ WHEN true THEN NULL
+ ELSE pg_current_wal_lsn()
+ END ),
+ CASE
+ WHEN wal.name > pg_walfile_name(
+ CASE pg_is_in_recovery()
+ WHEN true THEN NULL
+ ELSE pg_current_wal_lsn()
+ END ) THEN 'recycled'
+ ELSE 'written'
+ END AS type
+ FROM pg_catalog.pg_ls_dir('pg_wal') AS wal(name)
+ WHERE name ~ '^[0-9A-F]{24}$'
+ ORDER BY
+ (pg_stat_file('pg_wal/'||name)).modification,
+ wal.name DESC) sub;
+""",
+ V96: """
SELECT
count(*) as total_wal,
count(*) FILTER (WHERE type = 'recycled') AS recycled_wal,
@@ -112,34 +167,49 @@ SELECT
FROM
(SELECT
wal.name,
- pg_{0}file_name(
+ pg_xlogfile_name(
CASE pg_is_in_recovery()
WHEN true THEN NULL
- ELSE pg_current_{0}_{1}()
+ ELSE pg_current_xlog_location()
END ),
CASE
- WHEN wal.name > pg_{0}file_name(
+ WHEN wal.name > pg_xlogfile_name(
CASE pg_is_in_recovery()
WHEN true THEN NULL
- ELSE pg_current_{0}_{1}()
+ ELSE pg_current_xlog_location()
END ) THEN 'recycled'
ELSE 'written'
END AS type
- FROM pg_catalog.pg_ls_dir('pg_{0}') AS wal(name)
- WHERE name ~ '^[0-9A-F]{{24}}$'
+ FROM pg_catalog.pg_ls_dir('pg_xlog') AS wal(name)
+ WHERE name ~ '^[0-9A-F]{24}$'
ORDER BY
- (pg_stat_file('pg_{0}/'||name)).modification,
+ (pg_stat_file('pg_xlog/'||name)).modification,
wal.name DESC) sub;
""",
- 'ARCHIVE': """
+}
+
+QUERY_ARCHIVE = {
+ DEFAULT: """
+SELECT
+ CAST(COUNT(*) AS INT) AS file_count,
+ CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.ready$$r$ as INT)),0) AS INT) AS ready_count,
+ CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.done$$r$ AS INT)),0) AS INT) AS done_count
+FROM
+ pg_catalog.pg_ls_dir('pg_wal/archive_status') AS archive_files (archive_file);
+""",
+ V96: """
SELECT
CAST(COUNT(*) AS INT) AS file_count,
CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.ready$$r$ as INT)),0) AS INT) AS ready_count,
CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.done$$r$ AS INT)),0) AS INT) AS done_count
FROM
- pg_catalog.pg_ls_dir('pg_{0}/archive_status') AS archive_files (archive_file);
+ pg_catalog.pg_ls_dir('pg_xlog/archive_status') AS archive_files (archive_file);
+
""",
- 'BACKENDS': """
+}
+
+QUERY_BACKEND = {
+ DEFAULT: """
SELECT
count(*) - (SELECT count(*)
FROM pg_stat_activity
@@ -151,21 +221,30 @@ SELECT
AS backends_idle
FROM pg_stat_activity;
""",
- 'TABLE_STATS': """
+}
+
+QUERY_TABLE_STATS = {
+ DEFAULT: """
SELECT
((sum(relpages) * 8) * 1024) AS table_size,
count(1) AS table_count
FROM pg_class
WHERE relkind IN ('r', 't');
""",
- 'INDEX_STATS': """
+}
+
+QUERY_INDEX_STATS = {
+ DEFAULT: """
SELECT
((sum(relpages) * 8) * 1024) AS index_size,
count(1) AS index_count
FROM pg_class
WHERE relkind = 'i';
""",
- 'DATABASE': """
+}
+
+QUERY_DATABASE = {
+ DEFAULT: """
SELECT
datname AS database_name,
numbackends AS connections,
@@ -185,7 +264,10 @@ SELECT
FROM pg_stat_database
WHERE datname IN %(databases)s ;
""",
- 'BGWRITER': """
+}
+
+QUERY_BGWRITER = {
+ DEFAULT: """
SELECT
checkpoints_timed AS checkpoint_scheduled,
checkpoints_req AS checkpoint_requested,
@@ -197,7 +279,10 @@ SELECT
buffers_backend_fsync
FROM pg_stat_bgwriter;
""",
- 'LOCKS': """
+}
+
+QUERY_LOCKS = {
+ DEFAULT: """
SELECT
pg_database.datname as database_name,
mode,
@@ -208,7 +293,10 @@ INNER JOIN pg_database
GROUP BY datname, mode
ORDER BY datname, mode;
""",
- 'FIND_DATABASES': """
+}
+
+QUERY_DATABASES = {
+ DEFAULT: """
SELECT
datname
FROM pg_stat_database
@@ -217,48 +305,129 @@ WHERE
(SELECT current_user), datname, 'connect')
AND NOT datname ~* '^template\d ';
""",
- 'FIND_STANDBY': """
+}
+
+QUERY_STANDBY = {
+ DEFAULT: """
SELECT
application_name
FROM pg_stat_replication
WHERE application_name IS NOT NULL
GROUP BY application_name;
""",
- 'FIND_REPLICATION_SLOT': """
+}
+
+QUERY_REPLICATION_SLOT = {
+ DEFAULT: """
SELECT slot_name
FROM pg_replication_slots;
+"""
+}
+
+QUERY_STANDBY_DELTA = {
+ DEFAULT: """
+SELECT
+ application_name,
+ pg_wal_lsn_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_wal_receive_lsn()
+ ELSE pg_current_wal_lsn()
+ END,
+ sent_lsn) AS sent_delta,
+ pg_wal_lsn_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_wal_receive_lsn()
+ ELSE pg_current_wal_lsn()
+ END,
+ write_lsn) AS write_delta,
+ pg_wal_lsn_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_wal_receive_lsn()
+ ELSE pg_current_wal_lsn()
+ END,
+ flush_lsn) AS flush_delta,
+ pg_wal_lsn_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_wal_receive_lsn()
+ ELSE pg_current_wal_lsn()
+ END,
+ replay_lsn) AS replay_delta
+FROM pg_stat_replication
+WHERE application_name IS NOT NULL;
""",
- 'STANDBY_DELTA': """
+ V96: """
SELECT
application_name,
- pg_{0}_{1}_diff(
+ pg_xlog_location_diff(
CASE pg_is_in_recovery()
- WHEN true THEN pg_last_{0}_receive_{1}()
- ELSE pg_current_{0}_{1}()
+ WHEN true THEN pg_last_xlog_receive_location()
+ ELSE pg_current_xlog_location()
END,
- sent_{1}) AS sent_delta,
- pg_{0}_{1}_diff(
+ sent_location) AS sent_delta,
+ pg_xlog_location_diff(
CASE pg_is_in_recovery()
- WHEN true THEN pg_last_{0}_receive_{1}()
- ELSE pg_current_{0}_{1}()
+ WHEN true THEN pg_last_xlog_receive_location()
+ ELSE pg_current_xlog_location()
END,
- write_{1}) AS write_delta,
- pg_{0}_{1}_diff(
+ write_location) AS write_delta,
+ pg_xlog_location_diff(
CASE pg_is_in_recovery()
- WHEN true THEN pg_last_{0}_receive_{1}()
- ELSE pg_current_{0}_{1}()
+ WHEN true THEN pg_last_xlog_receive_location()
+ ELSE pg_current_xlog_location()
END,
- flush_{1}) AS flush_delta,
- pg_{0}_{1}_diff(
+ flush_location) AS flush_delta,
+ pg_xlog_location_diff(
CASE pg_is_in_recovery()
- WHEN true THEN pg_last_{0}_receive_{1}()
- ELSE pg_current_{0}_{1}()
+ WHEN true THEN pg_last_xlog_receive_location()
+ ELSE pg_current_xlog_location()
END,
- replay_{1}) AS replay_delta
+ replay_location) AS replay_delta
FROM pg_stat_replication
WHERE application_name IS NOT NULL;
""",
- 'REPSLOT_FILES': """
+}
+
+QUERY_REPSLOT_FILES = {
+ DEFAULT: """
+WITH wal_size AS (
+ SELECT
+ setting::int AS val
+ FROM pg_settings
+ WHERE name = 'wal_segment_size'
+ )
+SELECT
+ slot_name,
+ slot_type,
+ replslot_wal_keep,
+ count(slot_file) AS replslot_files
+FROM
+ (SELECT
+ slot.slot_name,
+ CASE
+ WHEN slot_file <> 'state' THEN 1
+ END AS slot_file ,
+ slot_type,
+ COALESCE (
+ floor(
+ (pg_wal_lsn_diff(pg_current_wal_lsn (),slot.restart_lsn)
+ - (pg_walfile_name_offset (restart_lsn)).file_offset) / (s.val)
+ ),0) AS replslot_wal_keep
+ FROM pg_replication_slots slot
+ LEFT JOIN (
+ SELECT
+ slot2.slot_name,
+ pg_ls_dir('pg_replslot/' || slot2.slot_name) AS slot_file
+ FROM pg_replication_slots slot2
+ ) files (slot_name, slot_file)
+ ON slot.slot_name = files.slot_name
+ CROSS JOIN wal_size s
+ ) AS d
+GROUP BY
+ slot_name,
+ slot_type,
+ replslot_wal_keep;
+""",
+ V10: """
WITH wal_size AS (
SELECT
current_setting('wal_block_size')::INT * setting::INT AS val
@@ -297,13 +466,22 @@ GROUP BY
slot_type,
replslot_wal_keep;
""",
- 'IF_SUPERUSER': """
+}
+
+QUERY_SUPERUSER = {
+ DEFAULT: """
SELECT current_setting('is_superuser') = 'on' AS is_superuser;
""",
- 'DETECT_SERVER_VERSION': """
+}
+
+QUERY_SHOW_VERSION = {
+ DEFAULT: """
SHOW server_version_num;
""",
- 'AUTOVACUUM': """
+}
+
+QUERY_AUTOVACUUM = {
+ DEFAULT: """
SELECT
count(*) FILTER (WHERE query LIKE 'autovacuum: ANALYZE%%') AS analyze,
count(*) FILTER (WHERE query LIKE 'autovacuum: VACUUM ANALYZE%%') AS vacuum_analyze,
@@ -315,23 +493,78 @@ SELECT
FROM pg_stat_activity
WHERE query NOT LIKE '%%pg_stat_activity%%';
""",
- 'DIFF_LSN': """
+}
+
+QUERY_DIFF_LSN = {
+ DEFAULT: """
SELECT
- pg_{0}_{1}_diff(
+ pg_wal_lsn_diff(
CASE pg_is_in_recovery()
- WHEN true THEN pg_last_{0}_receive_{1}()
- ELSE pg_current_{0}_{1}()
+ WHEN true THEN pg_last_wal_receive_lsn()
+ ELSE pg_current_wal_lsn()
END,
'0/0') as wal_writes ;
-"""
+""",
+ V96: """
+SELECT
+ pg_xlog_location_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_xlog_receive_location()
+ ELSE pg_current_xlog_location()
+ END,
+ '0/0') as wal_writes ;
+""",
}
-QUERY_STATS = {
- QUERIES['DATABASE']: METRICS['DATABASE'],
- QUERIES['BACKENDS']: METRICS['BACKENDS'],
- QUERIES['LOCKS']: METRICS['LOCKS']
-}
+def query_factory(name, version=NO_VERSION):
+ if name == BACKENDS:
+ return QUERY_BACKEND[DEFAULT]
+ elif name == TABLE_STATS:
+ return QUERY_TABLE_STATS[DEFAULT]
+ elif name == INDEX_STATS:
+ return QUERY_INDEX_STATS[DEFAULT]
+ elif name == DATABASE:
+ return QUERY_DATABASE[DEFAULT]
+ elif name == BGWRITER:
+ return QUERY_BGWRITER[DEFAULT]
+ elif name == LOCKS:
+ return QUERY_LOCKS[DEFAULT]
+ elif name == DATABASES:
+ return QUERY_DATABASES[DEFAULT]
+ elif name == STANDBY:
+ return QUERY_STANDBY[DEFAULT]
+ elif name == REPLICATION_SLOT:
+ return QUERY_REPLICATION_SLOT[DEFAULT]
+ elif name == IF_SUPERUSER:
+ return QUERY_SUPERUSER[DEFAULT]
+ elif name == SERVER_VERSION:
+ return QUERY_SHOW_VERSION[DEFAULT]
+ elif name == AUTOVACUUM:
+ return QUERY_AUTOVACUUM[DEFAULT]
+ elif name == WAL:
+ if version < 100000:
+ return QUERY_WAL[V96]
+ return QUERY_WAL[DEFAULT]
+ elif name == ARCHIVE:
+ if version < 100000:
+ return QUERY_ARCHIVE[V96]
+ return QUERY_ARCHIVE[DEFAULT]
+ elif name == STANDBY_DELTA:
+ if version < 100000:
+ return QUERY_STANDBY_DELTA[V96]
+ return QUERY_STANDBY_DELTA[DEFAULT]
+ elif name == REPSLOT_FILES:
+ if version < 110000:
+ return QUERY_REPSLOT_FILES[V10]
+ return QUERY_REPSLOT_FILES[DEFAULT]
+ elif name == DIFF_LSN:
+ if version < 100000:
+ return QUERY_DIFF_LSN[V96]
+ return QUERY_DIFF_LSN[DEFAULT]
+
+ raise ValueError('unknown query')
+
ORDER = [
'db_stat_temp_files',
@@ -403,7 +636,7 @@ CHARTS = {
]
},
'db_stat_temp_bytes': {
- 'options': [None, 'Temp files written to disk', 'KB/s', 'db statistics', 'postgres.db_stat_temp_bytes',
+ 'options': [None, 'Temp files written to disk', 'KiB/s', 'db statistics', 'postgres.db_stat_temp_bytes',
'line'],
'lines': [
['temp_bytes', 'size', 'incremental', 1, 1024]
@@ -417,7 +650,7 @@ CHARTS = {
]
},
'database_size': {
- 'options': [None, 'Database size', 'MB', 'database size', 'postgres.db_size', 'stacked'],
+ 'options': [None, 'Database size', 'MiB', 'database size', 'postgres.db_size', 'stacked'],
'lines': [
]
},
@@ -436,7 +669,7 @@ CHARTS = {
]
},
'index_size': {
- 'options': [None, 'Indexes size', 'MB', 'indexes', 'postgres.index_size', 'line'],
+ 'options': [None, 'Indexes size', 'MiB', 'indexes', 'postgres.index_size', 'line'],
'lines': [
['index_size', 'size', 'absolute', 1, 1024 * 1024]
]
@@ -448,7 +681,7 @@ CHARTS = {
]
},
'table_size': {
- 'options': [None, 'Tables size', 'MB', 'tables', 'postgres.table_size', 'line'],
+ 'options': [None, 'Tables size', 'MiB', 'tables', 'postgres.table_size', 'line'],
'lines': [
['table_size', 'size', 'absolute', 1, 1024 * 1024]
]
@@ -462,7 +695,7 @@ CHARTS = {
]
},
'wal_writes': {
- 'options': [None, 'Write-Ahead Logs', 'kilobytes/s', 'wal_writes', 'postgres.wal_writes', 'line'],
+ 'options': [None, 'Write-Ahead Logs', 'KiB/s', 'wal_writes', 'postgres.wal_writes', 'line'],
'lines': [
['wal_writes', 'writes', 'incremental', 1, 1024]
]
@@ -483,20 +716,20 @@ CHARTS = {
]
},
'stat_bgwriter_alloc': {
- 'options': [None, 'Buffers allocated', 'kilobytes/s', 'bgwriter', 'postgres.stat_bgwriter_alloc', 'line'],
+ 'options': [None, 'Buffers allocated', 'KiB/s', 'bgwriter', 'postgres.stat_bgwriter_alloc', 'line'],
'lines': [
['buffers_alloc', 'alloc', 'incremental', 1, 1024]
]
},
'stat_bgwriter_checkpoint': {
- 'options': [None, 'Buffers written during checkpoints', 'kilobytes/s', 'bgwriter',
+ 'options': [None, 'Buffers written during checkpoints', 'KiB/s', 'bgwriter',
'postgres.stat_bgwriter_checkpoint', 'line'],
'lines': [
['buffers_checkpoint', 'checkpoint', 'incremental', 1, 1024]
]
},
'stat_bgwriter_backend': {
- 'options': [None, 'Buffers written directly by a backend', 'kilobytes/s', 'bgwriter',
+ 'options': [None, 'Buffers written directly by a backend', 'KiB/s', 'bgwriter',
'postgres.stat_bgwriter_backend', 'line'],
'lines': [
['buffers_backend', 'backend', 'incremental', 1, 1024]
@@ -509,7 +742,7 @@ CHARTS = {
]
},
'stat_bgwriter_bgwriter': {
- 'options': [None, 'Buffers written by the background writer', 'kilobytes/s', 'bgwriter',
+ 'options': [None, 'Buffers written by the background writer', 'KiB/s', 'bgwriter',
'postgres.bgwriter_bgwriter', 'line'],
'lines': [
['buffers_clean', 'clean', 'incremental', 1, 1024]
@@ -533,7 +766,7 @@ CHARTS = {
]
},
'standby_delta': {
- 'options': [None, 'Standby delta', 'kilobytes', 'replication delta', 'postgres.standby_delta', 'line'],
+ 'options': [None, 'Standby delta', 'KiB', 'replication delta', 'postgres.standby_delta', 'line'],
'lines': [
['sent_delta', 'sent delta', 'absolute', 1, 1024],
['write_delta', 'write delta', 'absolute', 1, 1024],
@@ -554,186 +787,218 @@ CHARTS = {
class Service(SimpleService):
def __init__(self, configuration=None, name=None):
SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER[:]
+ self.order = list(ORDER)
self.definitions = deepcopy(CHARTS)
- self.table_stats = configuration.pop('table_stats', False)
- self.index_stats = configuration.pop('index_stats', False)
- self.database_poll = configuration.pop('database_poll', None)
+ self.do_table_stats = configuration.pop('table_stats', False)
+ self.do_index_stats = configuration.pop('index_stats', False)
+ self.databases_to_poll = configuration.pop('database_poll', None)
+ self.statement_timeout = configuration.pop('statement_timeout', DEFAULT_STATEMENT_TIMEOUT)
self.configuration = configuration
- self.connection = False
+ self.conn = None
self.server_version = None
- self.data = dict()
- self.locks_zeroed = dict()
+ self.is_superuser = False
+ self.alive = False
self.databases = list()
self.secondaries = list()
self.replication_slots = list()
- self.queries = QUERY_STATS.copy()
-
- def _connect(self):
- params = dict(user='postgres',
- database=None,
- password=None,
- host=None,
- port=5432)
- params.update(self.configuration)
-
- if not self.connection:
- try:
- self.connection = psycopg2.connect(**params)
- self.connection.set_isolation_level(extensions.ISOLATION_LEVEL_AUTOCOMMIT)
- self.connection.set_session(readonly=True)
- except OperationalError as error:
- return False, str(error)
- return True, True
+ self.queries = dict()
+ self.data = dict()
+
+ def reconnect(self):
+ return self.connect()
+
+ def connect(self):
+ if self.conn:
+ self.conn.close()
+ self.conn = None
+
+ try:
+ params = dict(
+ host=None,
+ port=DEFAULT_PORT,
+ database=None,
+ user=DEFAULT_USER,
+ password=None,
+ connect_timeout=DEFAULT_CONNECT_TIMEOUT,
+ options='-c statement_timeout={0}'.format(self.statement_timeout),
+ )
+ params.update(self.configuration)
+
+ self.conn = psycopg2.connect(**params)
+ self.conn.set_isolation_level(extensions.ISOLATION_LEVEL_AUTOCOMMIT)
+ self.conn.set_session(readonly=True)
+ except OperationalError as error:
+ self.error(error)
+ self.alive = False
+ else:
+ self.alive = True
+
+ return self.alive
def check(self):
if not PSYCOPG2:
- self.error('\'python-psycopg2\' module is needed to use postgres.chart.py')
+ self.error("'python-psycopg2' package is needed to use postgres module")
return False
- result, error = self._connect()
- if not result:
- conf = dict((k, (lambda k, v: v if k != 'password' else '*****')(k, v))
- for k, v in self.configuration.items())
- self.error('Failed to connect to %s. Error: %s' % (str(conf), error))
+
+ if not self.connect():
+ self.error('failed to connect to {0}'.format(hide_password(self.configuration)))
return False
+
try:
- cursor = self.connection.cursor()
- self.databases = discover_databases_(cursor, QUERIES['FIND_DATABASES'])
- is_superuser = check_if_superuser_(cursor, QUERIES['IF_SUPERUSER'])
- self.secondaries = discover_secondaries_(cursor, QUERIES['FIND_STANDBY'])
- self.server_version = detect_server_version(cursor, QUERIES['DETECT_SERVER_VERSION'])
- if self.server_version >= 94000:
- self.replication_slots = discover_replication_slots_(cursor, QUERIES['FIND_REPLICATION_SLOT'])
- cursor.close()
-
- if self.database_poll and isinstance(self.database_poll, str):
- self.databases = [dbase for dbase in self.databases if dbase in self.database_poll.split()] \
- or self.databases
-
- self.locks_zeroed = populate_lock_types(self.databases)
- self.add_additional_queries_(is_superuser)
- self.create_dynamic_charts_()
- return True
+ self.check_queries()
except Exception as error:
- self.error(str(error))
+ self.error(error)
return False
- def add_additional_queries_(self, is_superuser):
+ self.populate_queries()
+ self.create_dynamic_charts()
- if self.server_version >= 100000:
- wal = 'wal'
- lsn = 'lsn'
- else:
- wal = 'xlog'
- lsn = 'location'
- self.queries[QUERIES['BGWRITER']] = METRICS['BGWRITER']
- self.queries[QUERIES['DIFF_LSN'].format(wal, lsn)] = METRICS['WAL_WRITES']
- self.queries[QUERIES['STANDBY_DELTA'].format(wal, lsn)] = METRICS['STANDBY_DELTA']
-
- if self.index_stats:
- self.queries[QUERIES['INDEX_STATS']] = METRICS['INDEX_STATS']
- if self.table_stats:
- self.queries[QUERIES['TABLE_STATS']] = METRICS['TABLE_STATS']
- if is_superuser:
- self.queries[QUERIES['ARCHIVE'].format(wal)] = METRICS['ARCHIVE']
- if self.server_version >= 90400:
- self.queries[QUERIES['WAL'].format(wal, lsn)] = METRICS['WAL']
- if self.server_version >= 100000:
- self.queries[QUERIES['REPSLOT_FILES']] = METRICS['REPSLOT_FILES']
- if self.server_version >= 90400:
- self.queries[QUERIES['AUTOVACUUM']] = METRICS['AUTOVACUUM']
+ return True
- def create_dynamic_charts_(self):
+ def get_data(self):
+ if not self.alive and not self.reconnect():
+ return None
- for database_name in self.databases[::-1]:
- self.definitions['database_size']['lines'].append(
- [database_name + '_size', database_name, 'absolute', 1, 1024 * 1024])
- for chart_name in [name for name in self.order if name.startswith('db_stat')]:
- add_database_stat_chart_(order=self.order, definitions=self.definitions,
- name=chart_name, database_name=database_name)
+ try:
+ cursor = self.conn.cursor(cursor_factory=DictCursor)
- add_database_lock_chart_(order=self.order, definitions=self.definitions, database_name=database_name)
+ self.data.update(zero_lock_types(self.databases))
- for application_name in self.secondaries[::-1]:
- add_replication_delta_chart_(
- order=self.order,
- definitions=self.definitions,
- name='standby_delta',
- application_name=application_name)
+ for query, metrics in self.queries.items():
+ self.query_stats(cursor, query, metrics)
- for slot_name in self.replication_slots[::-1]:
- add_replication_slot_chart_(
- order=self.order,
- definitions=self.definitions,
- name='replication_slot',
- slot_name=slot_name)
-
- def _get_data(self):
- result, _ = self._connect()
- if result:
- cursor = self.connection.cursor(cursor_factory=DictCursor)
- try:
- self.data.update(self.locks_zeroed)
- for query, metrics in self.queries.items():
- self.query_stats_(cursor, query, metrics)
-
- except OperationalError:
- self.connection = False
- cursor.close()
- return None
- else:
- cursor.close()
- return self.data
- else:
+ except OperationalError:
+ self.alive = False
return None
- def query_stats_(self, cursor, query, metrics):
+ cursor.close()
+
+ return self.data
+
+ def query_stats(self, cursor, query, metrics):
cursor.execute(query, dict(databases=tuple(self.databases)))
+
for row in cursor:
for metric in metrics:
+ # databases
if 'database_name' in row:
dimension_id = '_'.join([row['database_name'], metric])
+ # secondaries
elif 'application_name' in row:
dimension_id = '_'.join([row['application_name'], metric])
+ # replication slots
elif 'slot_name' in row:
dimension_id = '_'.join([row['slot_name'], metric])
+ # other
else:
dimension_id = metric
+
if metric in row:
if row[metric] is not None:
self.data[dimension_id] = int(row[metric])
elif 'locks_count' in row:
- self.data[dimension_id] = row['locks_count'] if metric == row['mode'] else 0
+ if metric == row['mode']:
+ self.data[dimension_id] = row['locks_count']
+ def check_queries(self):
+ cursor = self.conn.cursor()
-def discover_databases_(cursor, query):
- cursor.execute(query)
- result = list()
- for db in [database[0] for database in cursor]:
- if db not in result:
- result.append(db)
- return result
+ self.server_version = detect_server_version(cursor, query_factory(SERVER_VERSION))
+ self.debug('server version: {0}'.format(self.server_version))
+ self.is_superuser = check_if_superuser(cursor, query_factory(IF_SUPERUSER))
+ self.debug('superuser: {0}'.format(self.is_superuser))
-def discover_secondaries_(cursor, query):
- cursor.execute(query)
- result = list()
- for sc in [standby[0] for standby in cursor]:
- if sc not in result:
- result.append(sc)
- return result
+ self.databases = discover(cursor, query_factory(DATABASES))
+ self.debug('discovered databases {0}'.format(self.databases))
+ if self.databases_to_poll:
+ to_poll = self.databases_to_poll.split()
+ self.databases = [db for db in self.databases if db in to_poll] or self.databases
+
+ self.secondaries = discover(cursor, query_factory(STANDBY))
+ self.debug('discovered secondaries: {0}'.format(self.secondaries))
+
+ if self.server_version >= 94000:
+ self.replication_slots = discover(cursor, query_factory(REPLICATION_SLOT))
+ self.debug('discovered replication slots: {0}'.format(self.replication_slots))
+
+ cursor.close()
+
+ def populate_queries(self):
+ self.queries[query_factory(DATABASE)] = METRICS[DATABASE]
+ self.queries[query_factory(BACKENDS)] = METRICS[BACKENDS]
+ self.queries[query_factory(LOCKS)] = METRICS[LOCKS]
+ self.queries[query_factory(BGWRITER)] = METRICS[BGWRITER]
+ self.queries[query_factory(DIFF_LSN, self.server_version)] = METRICS[WAL_WRITES]
+ self.queries[query_factory(STANDBY_DELTA, self.server_version)] = METRICS[STANDBY_DELTA]
+
+ if self.do_index_stats:
+ self.queries[query_factory(INDEX_STATS)] = METRICS[INDEX_STATS]
+ if self.do_table_stats:
+ self.queries[query_factory(TABLE_STATS)] = METRICS[TABLE_STATS]
+
+ if self.is_superuser:
+ self.queries[query_factory(ARCHIVE, self.server_version)] = METRICS[ARCHIVE]
+
+ if self.server_version >= 90400:
+ self.queries[query_factory(WAL, self.server_version)] = METRICS[WAL]
+
+ if self.server_version >= 100000:
+ self.queries[query_factory(REPSLOT_FILES, self.server_version)] = METRICS[REPSLOT_FILES]
+
+ if self.server_version >= 90400:
+ self.queries[query_factory(AUTOVACUUM)] = METRICS[AUTOVACUUM]
+
+ def create_dynamic_charts(self):
+ for database_name in self.databases[::-1]:
+ dim = [
+ database_name + '_size',
+ database_name,
+ 'absolute',
+ 1,
+ 1024 * 1024,
+ ]
+ self.definitions['database_size']['lines'].append(dim)
+ for chart_name in [name for name in self.order if name.startswith('db_stat')]:
+ add_database_stat_chart(
+ order=self.order,
+ definitions=self.definitions,
+ name=chart_name,
+ database_name=database_name,
+ )
+ add_database_lock_chart(
+ order=self.order,
+ definitions=self.definitions,
+ database_name=database_name,
+ )
+
+ for application_name in self.secondaries[::-1]:
+ add_replication_delta_chart(
+ order=self.order,
+ definitions=self.definitions,
+ name='standby_delta',
+ application_name=application_name,
+ )
+
+ for slot_name in self.replication_slots[::-1]:
+ add_replication_slot_chart(
+ order=self.order,
+ definitions=self.definitions,
+ name='replication_slot',
+ slot_name=slot_name,
+ )
-def discover_replication_slots_(cursor, query):
+def discover(cursor, query):
cursor.execute(query)
result = list()
- for slot in [replication_slot[0] for replication_slot in cursor]:
- if slot not in result:
- result.append(slot)
+ for v in [value[0] for value in cursor]:
+ if v not in result:
+ result.append(v)
return result
-def check_if_superuser_(cursor, query):
+def check_if_superuser(cursor, query):
cursor.execute(query)
return cursor.fetchone()[0]
@@ -743,7 +1008,7 @@ def detect_server_version(cursor, query):
return int(cursor.fetchone()[0])
-def populate_lock_types(databases):
+def zero_lock_types(databases):
result = dict()
for database in databases:
for lock_type in METRICS['LOCKS']:
@@ -753,7 +1018,11 @@ def populate_lock_types(databases):
return result
-def add_database_lock_chart_(order, definitions, database_name):
+def hide_password(config):
+ return dict((k, v if k != 'password' else '*****') for k, v in config.items())
+
+
+def add_database_lock_chart(order, definitions, database_name):
def create_lines(database):
result = list()
for lock_type in METRICS['LOCKS']:
@@ -770,7 +1039,7 @@ def add_database_lock_chart_(order, definitions, database_name):
}
-def add_database_stat_chart_(order, definitions, name, database_name):
+def add_database_stat_chart(order, definitions, name, database_name):
def create_lines(database, lines):
result = list()
for line in lines:
@@ -787,7 +1056,7 @@ def add_database_stat_chart_(order, definitions, name, database_name):
'lines': create_lines(database_name, chart_template['lines'])}
-def add_replication_delta_chart_(order, definitions, name, application_name):
+def add_replication_delta_chart(order, definitions, name, application_name):
def create_lines(standby, lines):
result = list()
for line in lines:
@@ -799,13 +1068,13 @@ def add_replication_delta_chart_(order, definitions, name, application_name):
chart_name = '_'.join([application_name, name])
position = order.index('database_size')
order.insert(position, chart_name)
- name, title, units, family, context, chart_type = chart_template['options']
+ name, title, units, _, context, chart_type = chart_template['options']
definitions[chart_name] = {
'options': [name, title + ': ' + application_name, units, 'replication delta', context, chart_type],
'lines': create_lines(application_name, chart_template['lines'])}
-def add_replication_slot_chart_(order, definitions, name, slot_name):
+def add_replication_slot_chart(order, definitions, name, slot_name):
def create_lines(slot, lines):
result = list()
for line in lines:
@@ -817,7 +1086,7 @@ def add_replication_slot_chart_(order, definitions, name, slot_name):
chart_name = '_'.join([slot_name, name])
position = order.index('database_size')
order.insert(position, chart_name)
- name, title, units, family, context, chart_type = chart_template['options']
+ name, title, units, _, context, chart_type = chart_template['options']
definitions[chart_name] = {
'options': [name, title + ': ' + slot_name, units, 'replication slot files', context, chart_type],
'lines': create_lines(slot_name, chart_template['lines'])}
diff --git a/collectors/python.d.plugin/postgres/postgres.conf b/collectors/python.d.plugin/postgres/postgres.conf
index b69ca371..cde698f3 100644
--- a/collectors/python.d.plugin/postgres/postgres.conf
+++ b/collectors/python.d.plugin/postgres/postgres.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,18 +56,20 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# A single connection is required in order to pull statistics.
#
# Connections can be configured with the following options:
#
-# database : 'example_db_name'
-# user : 'example_user'
-# password : 'example_pass'
-# host : 'localhost'
-# port : 5432
+# database : 'example_db_name'
+# user : 'example_user'
+# password : 'example_pass'
+# host : 'localhost'
+# port : 5432
+# connect_timeout : 2 # in seconds, default is 2
+# statement_timeout : 2000 # in ms, default is 2000
#
# Additionally, the following options allow selective disabling of charts
#
diff --git a/collectors/python.d.plugin/powerdns/README.md b/collectors/python.d.plugin/powerdns/README.md
index 3c4b145e..61aa5f6b 100644
--- a/collectors/python.d.plugin/powerdns/README.md
+++ b/collectors/python.d.plugin/powerdns/README.md
@@ -75,3 +75,5 @@ local:
```
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fpowerdns%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/powerdns/powerdns.chart.py b/collectors/python.d.plugin/powerdns/powerdns.chart.py
index 4264621b..7ed1554f 100644
--- a/collectors/python.d.plugin/powerdns/powerdns.chart.py
+++ b/collectors/python.d.plugin/powerdns/powerdns.chart.py
@@ -8,11 +8,14 @@ from json import loads
from bases.FrameworkServices.UrlService import UrlService
-priority = 60000
-retries = 60
-# update_every = 3
-ORDER = ['questions', 'cache_usage', 'cache_size', 'latency']
+ORDER = [
+ 'questions',
+ 'cache_usage',
+ 'cache_size',
+ 'latency',
+]
+
CHARTS = {
'questions': {
'options': [None, 'PowerDNS Queries and Answers', 'count', 'questions', 'powerdns.questions', 'line'],
diff --git a/collectors/python.d.plugin/powerdns/powerdns.conf b/collectors/python.d.plugin/powerdns/powerdns.conf
index ca6200df..559bf175 100644
--- a/collectors/python.d.plugin/powerdns/powerdns.conf
+++ b/collectors/python.d.plugin/powerdns/powerdns.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, apache also supports the following:
diff --git a/collectors/python.d.plugin/proxysql/README.md b/collectors/python.d.plugin/proxysql/README.md
index 02388276..6e5a2127 100644
--- a/collectors/python.d.plugin/proxysql/README.md
+++ b/collectors/python.d.plugin/proxysql/README.md
@@ -60,3 +60,5 @@ tcpipv4:
If no configuration is given, module will fail to run.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fproxysql%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/proxysql/proxysql.chart.py b/collectors/python.d.plugin/proxysql/proxysql.chart.py
index f7e3d49f..c9714748 100644
--- a/collectors/python.d.plugin/proxysql/proxysql.chart.py
+++ b/collectors/python.d.plugin/proxysql/proxysql.chart.py
@@ -5,11 +5,6 @@
from bases.FrameworkServices.MySQLService import MySQLService
-# default module values (can be overridden per job in `config`)
-# update_every = 3
-priority = 60000
-retries = 60
-
def query(table, *params):
return 'SELECT {params} FROM {table}'.format(table=table, params=', '.join(params))
@@ -133,8 +128,8 @@ CHARTS = {
'options': [None, 'ProxySQL Backend Overall Bandwidth', 'kilobits/s', 'overall_bandwidth',
'proxysql.pool_overall_net', 'area'],
'lines': [
- ['bytes_data_recv', 'in', 'incremental', 8, 1024],
- ['bytes_data_sent', 'out', 'incremental', -8, 1024]
+ ['bytes_data_recv', 'in', 'incremental', 8, 1000],
+ ['bytes_data_sent', 'out', 'incremental', -8, 1000]
]
},
'questions': {
@@ -156,7 +151,7 @@ CHARTS = {
]
},
'pool_latency': {
- 'options': [None, 'ProxySQL Backend Latency', 'ms', 'latency', 'proxysql.latency', 'line'],
+ 'options': [None, 'ProxySQL Backend Latency', 'milliseconds', 'latency', 'proxysql.latency', 'line'],
'lines': []
},
'connections': {
@@ -194,7 +189,7 @@ CHARTS = {
'lines': []
},
'commands_duration': {
- 'options': [None, 'ProxySQL Commands Duration', 'ms', 'commands', 'proxysql.commands_duration', 'line'],
+ 'options': [None, 'ProxySQL Commands Duration', 'milliseconds', 'commands', 'proxysql.commands_duration', 'line'],
'lines': []
}
}
diff --git a/collectors/python.d.plugin/proxysql/proxysql.conf b/collectors/python.d.plugin/proxysql/proxysql.conf
index d29c2e5b..3c503a89 100644
--- a/collectors/python.d.plugin/proxysql/proxysql.conf
+++ b/collectors/python.d.plugin/proxysql/proxysql.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, proxysql also supports the following:
diff --git a/collectors/python.d.plugin/puppet/README.md b/collectors/python.d.plugin/puppet/README.md
index 8304c831..b97eb70c 100644
--- a/collectors/python.d.plugin/puppet/README.md
+++ b/collectors/python.d.plugin/puppet/README.md
@@ -26,16 +26,13 @@ puppetdb:
tls_cert_file: /path/to/client.crt
tls_key_file: /path/to/client.key
autodetection_retry: 1
- retries: 3600
puppetserver:
url: 'https://fqdn.example.com:8140'
autodetection_retry: 1
- retries: 3600
```
-When no configuration is given then `https://fqdn.example.com:8140` is
-tried without any retries.
+When no configuration is given, module uses `https://fqdn.example.com:8140`.
### notes
@@ -46,3 +43,5 @@ tried without any retries.
to default PuppetDB configuration though.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fpuppet%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/puppet/puppet.chart.py b/collectors/python.d.plugin/puppet/puppet.chart.py
index 5c8e48bd..30e219da 100644
--- a/collectors/python.d.plugin/puppet/puppet.chart.py
+++ b/collectors/python.d.plugin/puppet/puppet.chart.py
@@ -11,29 +11,31 @@
# and tls_cert_file options then.
#
-from bases.FrameworkServices.UrlService import UrlService
-from json import loads
import socket
+from json import loads
+
+from bases.FrameworkServices.UrlService import UrlService
+
update_every = 5
-priority = 60000
-# very long clojure-based service startup time
-retries = 180
-MB = 1048576
+
+MiB = 1 << 20
CPU_SCALE = 1000
+
ORDER = [
'jvm_heap',
'jvm_nonheap',
'cpu',
'fd_open',
]
+
CHARTS = {
'jvm_heap': {
- 'options': [None, 'JVM Heap', 'MB', 'resources', 'puppet.jvm', 'area'],
+ 'options': [None, 'JVM Heap', 'MiB', 'resources', 'puppet.jvm', 'area'],
'lines': [
- ['jvm_heap_committed', 'committed', 'absolute', 1, MB],
- ['jvm_heap_used', 'used', 'absolute', 1, MB],
+ ['jvm_heap_committed', 'committed', 'absolute', 1, MiB],
+ ['jvm_heap_used', 'used', 'absolute', 1, MiB],
],
'variables': [
['jvm_heap_max'],
@@ -41,10 +43,10 @@ CHARTS = {
],
},
'jvm_nonheap': {
- 'options': [None, 'JVM Non-Heap', 'MB', 'resources', 'puppet.jvm', 'area'],
+ 'options': [None, 'JVM Non-Heap', 'MiB', 'resources', 'puppet.jvm', 'area'],
'lines': [
- ['jvm_nonheap_committed', 'committed', 'absolute', 1, MB],
- ['jvm_nonheap_used', 'used', 'absolute', 1, MB],
+ ['jvm_nonheap_committed', 'committed', 'absolute', 1, MiB],
+ ['jvm_nonheap_used', 'used', 'absolute', 1, MiB],
],
'variables': [
['jvm_nonheap_max'],
@@ -73,9 +75,9 @@ CHARTS = {
class Service(UrlService):
def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name)
- self.url = 'https://{0}:8140'.format(socket.getfqdn())
self.order = ORDER
self.definitions = CHARTS
+ self.url = 'https://{0}:8140'.format(socket.getfqdn())
def _get_data(self):
# NOTE: there are several ways to retrieve data
diff --git a/collectors/python.d.plugin/puppet/puppet.conf b/collectors/python.d.plugin/puppet/puppet.conf
index 991bfabe..ff5c3d02 100644
--- a/collectors/python.d.plugin/puppet/puppet.conf
+++ b/collectors/python.d.plugin/puppet/puppet.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# These configuration comes from UrlService base:
@@ -89,10 +87,8 @@
# tls_cert_file: /path/to/client.crt
# tls_key_file: /path/to/client.key
# autodetection_retry: 1
-# retries: 3600
#
# puppetserver:
# url: 'https://fqdn.example.com:8140'
# autodetection_retry: 1
-# retries: 3600
#
diff --git a/collectors/python.d.plugin/python.d.conf b/collectors/python.d.plugin/python.d.conf
index 40c8c033..72236209 100644
--- a/collectors/python.d.plugin/python.d.conf
+++ b/collectors/python.d.plugin/python.d.conf
@@ -28,6 +28,7 @@ gc_interval: 300
# apache: yes
# apache_cache has been replaced by web_log
+# adaptec_raid: yes
apache_cache: no
# beanstalk: yes
# bind_rndc: yes
@@ -39,6 +40,7 @@ chrony: no
# cpuidle: yes
# dns_query_time: yes
# dnsdist: yes
+# dockerd: yes
# dovecot: yes
# elasticsearch: yes
@@ -54,6 +56,7 @@ go_expvar: no
gunicorn_log: no
# haproxy: yes
# hddtemp: yes
+# httpcheck: yes
# icecast: yes
# ipfs: yes
# isc_dhcpd: yes
@@ -61,6 +64,7 @@ gunicorn_log: no
# litespeed: yes
logind: no
# mdstat: yes
+# megacli: yes
# memcached: yes
# mongodb: yes
# monit: yes
@@ -76,6 +80,7 @@ nginx_log: no
# openldap: yes
# ovpn_status_log: yes
# phpfpm: yes
+# portcheck: yes
# postfix: yes
# postgres: yes
# powerdns: yes
@@ -91,6 +96,7 @@ nginx_log: no
# spigotmc: yes
# springboot: yes
# squid: yes
+# traefik: yes
# tomcat: yes
# tor: yes
unbound: no
diff --git a/collectors/python.d.plugin/python.d.plugin b/collectors/python.d.plugin/python.d.plugin
deleted file mode 100644
index efff2273..00000000
--- a/collectors/python.d.plugin/python.d.plugin
+++ /dev/null
@@ -1,427 +0,0 @@
-#!/usr/bin/env bash
-'''':; exec "$(command -v python || command -v python3 || command -v python2 ||
-echo "ERROR python IS NOT AVAILABLE IN THIS SYSTEM")" "$0" "$@" # '''
-
-# -*- coding: utf-8 -*-
-# Description:
-# Author: Pawel Krupa (paulfantom)
-# Author: Ilya Mashchenko (l2isbad)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import gc
-import os
-import sys
-import threading
-
-from re import sub
-from sys import version_info, argv
-from time import sleep
-
-GC_RUN = True
-GC_COLLECT_EVERY = 300
-
-PY_VERSION = version_info[:2]
-
-USER_CONFIG_DIR = os.getenv('NETDATA_USER_CONFIG_DIR', '/usr/local/etc/netdata')
-STOCK_CONFIG_DIR = os.getenv('NETDATA_STOCK_CONFIG_DIR', '/usr/local/lib/netdata/conf.d')
-
-PLUGINS_USER_CONFIG_DIR = os.path.join(USER_CONFIG_DIR, 'python.d')
-PLUGINS_STOCK_CONFIG_DIR = os.path.join(STOCK_CONFIG_DIR, 'python.d')
-
-
-PLUGINS_DIR = os.path.abspath(os.getenv(
- 'NETDATA_PLUGINS_DIR',
- os.path.dirname(__file__)) + '/../python.d')
-
-
-PYTHON_MODULES_DIR = os.path.join(PLUGINS_DIR, 'python_modules')
-
-sys.path.append(PYTHON_MODULES_DIR)
-
-from bases.loaders import ModuleAndConfigLoader # noqa: E402
-from bases.loggers import PythonDLogger # noqa: E402
-from bases.collection import setdefault_values, run_and_exit # noqa: E402
-
-try:
- from collections import OrderedDict
-except ImportError:
- from third_party.ordereddict import OrderedDict
-
-BASE_CONFIG = {'update_every': os.getenv('NETDATA_UPDATE_EVERY', 1),
- 'retries': 60,
- 'priority': 60000,
- 'autodetection_retry': 0,
- 'chart_cleanup': 10,
- 'name': str()}
-
-
-MODULE_EXTENSION = '.chart.py'
-OBSOLETE_MODULES = ['apache_cache', 'gunicorn_log', 'nginx_log', 'cpufreq']
-
-
-def module_ok(m):
- return m.endswith(MODULE_EXTENSION) and m[:-len(MODULE_EXTENSION)] not in OBSOLETE_MODULES
-
-
-ALL_MODULES = [m for m in sorted(os.listdir(PLUGINS_DIR)) if module_ok(m)]
-
-
-def parse_cmd():
- debug = 'debug' in argv[1:]
- trace = 'trace' in argv[1:]
- override_update_every = next((arg for arg in argv[1:] if arg.isdigit() and int(arg) > 1), False)
- modules = [''.join([m, MODULE_EXTENSION]) for m in argv[1:] if ''.join([m, MODULE_EXTENSION]) in ALL_MODULES]
- return debug, trace, override_update_every, modules or ALL_MODULES
-
-
-def multi_job_check(config):
- return next((True for key in config if isinstance(config[key], dict)), False)
-
-
-class RawModule:
- def __init__(self, name, path, explicitly_enabled=True):
- self.name = name
- self.path = path
- self.explicitly_enabled = explicitly_enabled
-
-
-class Job(object):
- def __init__(self, initialized_job, job_id):
- """
- :param initialized_job: instance of <Class Service>
- :param job_id: <str>
- """
- self.job = initialized_job
- self.id = job_id # key in Modules.jobs()
- self.module_name = self.job.__module__ # used in Plugin.delete_job()
- self.recheck_every = self.job.configuration.pop('autodetection_retry')
- self.checked = False # used in Plugin.check_job()
- self.created = False # used in Plugin.create_job_charts()
- if self.job.update_every < int(OVERRIDE_UPDATE_EVERY):
- self.job.update_every = int(OVERRIDE_UPDATE_EVERY)
-
- def __getattr__(self, item):
- return getattr(self.job, item)
-
- def __repr__(self):
- return self.job.__repr__()
-
- def is_dead(self):
- return bool(self.ident) and not self.is_alive()
-
- def not_launched(self):
- return not bool(self.ident)
-
- def is_autodetect(self):
- return self.recheck_every
-
-
-class Module(object):
- def __init__(self, service, config):
- """
- :param service: <Module>
- :param config: <dict>
- """
- self.service = service
- self.name = service.__name__
- self.config = self.jobs_configurations_builder(config)
- self.jobs = OrderedDict()
- self.counter = 1
-
- self.initialize_jobs()
-
- def __repr__(self):
- return "<Class Module '{name}'>".format(name=self.name)
-
- def __iter__(self):
- return iter(OrderedDict(self.jobs).values())
-
- def __getitem__(self, item):
- return self.jobs[item]
-
- def __delitem__(self, key):
- del self.jobs[key]
-
- def __len__(self):
- return len(self.jobs)
-
- def __bool__(self):
- return bool(self.jobs)
-
- def __nonzero__(self):
- return self.__bool__()
-
- def jobs_configurations_builder(self, config):
- """
- :param config: <dict>
- :return:
- """
- counter = 0
- job_base_config = dict()
-
- for attr in BASE_CONFIG:
- job_base_config[attr] = config.pop(attr, getattr(self.service, attr, BASE_CONFIG[attr]))
-
- if not config:
- config = {str(): dict()}
- elif not multi_job_check(config):
- config = {str(): config}
-
- for job_name in config:
- if not isinstance(config[job_name], dict):
- continue
-
- job_config = setdefault_values(config[job_name], base_dict=job_base_config)
- job_name = sub(r'\s+', '_', job_name)
- config[job_name]['name'] = sub(r'\s+', '_', config[job_name]['name'])
- counter += 1
- job_id = 'job' + str(counter).zfill(3)
-
- yield job_id, job_name, job_config
-
- def initialize_jobs(self):
- """
- :return:
- """
- for job_id, job_name, job_config in self.config:
- job_config['job_name'] = job_name
- job_config['override_name'] = job_config.pop('name')
-
- try:
- initialized_job = self.service.Service(configuration=job_config)
- except Exception as error:
- Logger.error("job initialization: '{module_name} {job_name}' "
- "=> ['FAILED'] ({error})".format(module_name=self.name,
- job_name=job_name,
- error=error))
- continue
- else:
- Logger.debug("job initialization: '{module_name} {job_name}' "
- "=> ['OK']".format(module_name=self.name,
- job_name=job_name or self.name))
- self.jobs[job_id] = Job(initialized_job=initialized_job,
- job_id=job_id)
- del self.config
- del self.service
-
-
-class Plugin(object):
- def __init__(self):
- self.loader = ModuleAndConfigLoader()
- self.modules = OrderedDict()
- self.sleep_time = 1
- self.runs_counter = 0
-
- user_config = os.path.join(USER_CONFIG_DIR, 'python.d.conf')
- stock_config = os.path.join(STOCK_CONFIG_DIR, 'python.d.conf')
-
- Logger.debug("loading '{0}'".format(user_config))
- self.config, error = self.loader.load_config_from_file(user_config)
-
- if error:
- Logger.error("cannot load '{0}': {1}. Will try stock version.".format(user_config, error))
- Logger.debug("loading '{0}'".format(stock_config))
- self.config, error = self.loader.load_config_from_file(stock_config)
- if error:
- Logger.error("cannot load '{0}': {1}".format(stock_config, error))
-
- self.do_gc = self.config.get("gc_run", GC_RUN)
- self.gc_interval = self.config.get("gc_interval", GC_COLLECT_EVERY)
-
- if not self.config.get('enabled', True):
- run_and_exit(Logger.info)('DISABLED in configuration file.')
-
- self.load_and_initialize_modules()
- if not self.modules:
- run_and_exit(Logger.info)('No modules to run. Exit...')
-
- def __iter__(self):
- return iter(OrderedDict(self.modules).values())
-
- @property
- def jobs(self):
- return (job for mod in self for job in mod)
-
- @property
- def dead_jobs(self):
- return (job for job in self.jobs if job.is_dead())
-
- @property
- def autodetect_jobs(self):
- return [job for job in self.jobs if job.not_launched()]
-
- def enabled_modules(self):
- for mod in MODULES_TO_RUN:
- mod_name = mod[:-len(MODULE_EXTENSION)]
- mod_path = os.path.join(PLUGINS_DIR, mod)
- if any(
- [
- self.config.get('default_run', True) and self.config.get(mod_name, True),
- (not self.config.get('default_run')) and self.config.get(mod_name),
- ]
- ):
- yield RawModule(
- name=mod_name,
- path=mod_path,
- explicitly_enabled=self.config.get(mod_name),
- )
-
- def load_and_initialize_modules(self):
- for mod in self.enabled_modules():
-
- # Load module from file ------------------------------------------------------------
- loaded_module, error = self.loader.load_module_from_file(mod.name, mod.path)
- log = Logger.error if error else Logger.debug
- log("module load source: '{module_name}' => [{status}]".format(status='FAILED' if error else 'OK',
- module_name=mod.name))
- if error:
- Logger.error("load source error : {0}".format(error))
- continue
-
- # Load module config from file ------------------------------------------------------
- user_config = os.path.join(PLUGINS_USER_CONFIG_DIR, mod.name + '.conf')
- stock_config = os.path.join(PLUGINS_STOCK_CONFIG_DIR, mod.name + '.conf')
-
- Logger.debug("loading '{0}'".format(user_config))
- loaded_config, error = self.loader.load_config_from_file(user_config)
- if error:
- Logger.error("cannot load '{0}' : {1}. Will try stock version.".format(user_config, error))
- Logger.debug("loading '{0}'".format(stock_config))
- loaded_config, error = self.loader.load_config_from_file(stock_config)
-
- if error:
- Logger.error("cannot load '{0}': {1}".format(stock_config, error))
-
- # Skip disabled modules
- if getattr(loaded_module, 'disabled_by_default', False) and not mod.explicitly_enabled:
- Logger.info("module '{0}' disabled by default".format(loaded_module.__name__))
- continue
-
- # Module initialization ---------------------------------------------------
-
- initialized_module = Module(service=loaded_module, config=loaded_config)
- Logger.debug("module status: '{module_name}' => [{status}] "
- "(jobs: {jobs_number})".format(status='OK' if initialized_module else 'FAILED',
- module_name=initialized_module.name,
- jobs_number=len(initialized_module)))
- if initialized_module:
- self.modules[initialized_module.name] = initialized_module
-
- @staticmethod
- def check_job(job):
- """
- :param job: <Job>
- :return:
- """
- try:
- check_ok = bool(job.check())
- except Exception as error:
- job.error('check() unhandled exception: {error}'.format(error=error))
- return None
- else:
- return check_ok
-
- @staticmethod
- def create_job_charts(job):
- """
- :param job: <Job>
- :return:
- """
- try:
- create_ok = job.create()
- except Exception as error:
- job.error('create() unhandled exception: {error}'.format(error=error))
- return False
- else:
- return create_ok
-
- def delete_job(self, job):
- """
- :param job: <Job>
- :return:
- """
- del self.modules[job.module_name][job.id]
-
- def run_check(self):
- checked = list()
- for job in self.jobs:
- if job.name in checked:
- job.info('check() => [DROPPED] (already served by another job)')
- self.delete_job(job)
- continue
- ok = self.check_job(job)
- if ok:
- job.info('check() => [OK]')
- checked.append(job.name)
- job.checked = True
- continue
- if not job.is_autodetect() or ok is None:
- job.info('check() => [FAILED]')
- self.delete_job(job)
- else:
- job.info('check() => [RECHECK] (autodetection_retry: {0})'.format(job.recheck_every))
-
- def run_create(self):
- for job in self.jobs:
- if not job.checked:
- # skip autodetection_retry jobs
- continue
- ok = self.create_job_charts(job)
- if ok:
- job.debug('create() => [OK] (charts: {0})'.format(len(job.charts)))
- job.created = True
- continue
- job.error('create() => [FAILED] (charts: {0})'.format(len(job.charts)))
- self.delete_job(job)
-
- def start(self):
- self.run_check()
- self.run_create()
- for job in self.jobs:
- if job.created:
- job.start()
-
- while True:
- if threading.active_count() <= 1 and not self.autodetect_jobs:
- run_and_exit(Logger.info)('FINISHED')
-
- sleep(self.sleep_time)
- self.cleanup()
- self.autodetect_retry()
-
- # FIXME: https://github.com/netdata/netdata/issues/3817
- if self.do_gc and self.runs_counter % self.gc_interval == 0:
- v = gc.collect()
- Logger.debug("GC full collection run result: {0}".format(v))
-
- def cleanup(self):
- for job in self.dead_jobs:
- self.delete_job(job)
- for mod in self:
- if not mod:
- del self.modules[mod.name]
-
- def autodetect_retry(self):
- self.runs_counter += self.sleep_time
- for job in self.autodetect_jobs:
- if self.runs_counter % job.recheck_every == 0:
- checked = self.check_job(job)
- if checked:
- created = self.create_job_charts(job)
- if not created:
- self.delete_job(job)
- continue
- job.start()
-
-
-if __name__ == '__main__':
- DEBUG, TRACE, OVERRIDE_UPDATE_EVERY, MODULES_TO_RUN = parse_cmd()
- Logger = PythonDLogger()
- if DEBUG:
- Logger.logger.severity = 'DEBUG'
- if TRACE:
- Logger.log_traceback = True
- Logger.info('Using python {version}'.format(version=PY_VERSION[0]))
-
- plugin = Plugin()
- plugin.start()
diff --git a/collectors/python.d.plugin/python.d.plugin.in b/collectors/python.d.plugin/python.d.plugin.in
index 8b55ad41..6521fed9 100755..100644
--- a/collectors/python.d.plugin/python.d.plugin.in
+++ b/collectors/python.d.plugin/python.d.plugin.in
@@ -48,15 +48,15 @@ except ImportError:
from third_party.ordereddict import OrderedDict
BASE_CONFIG = {'update_every': os.getenv('NETDATA_UPDATE_EVERY', 1),
- 'retries': 60,
'priority': 60000,
'autodetection_retry': 0,
'chart_cleanup': 10,
+ 'penalty': True,
'name': str()}
MODULE_EXTENSION = '.chart.py'
-OBSOLETE_MODULES = ['apache_cache', 'gunicorn_log', 'nginx_log', 'cpufreq']
+OBSOLETE_MODULES = ['apache_cache', 'gunicorn_log', 'nginx_log', 'cpufreq', 'cpuidle', 'mdstat', 'linux_power_supply']
def module_ok(m):
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py
index 53807e2c..9a694aa8 100644
--- a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py
@@ -131,20 +131,22 @@ class MySQLService(SimpleService):
raw_data = dict()
queries = dict(self.queries)
try:
- with self.__connection as cursor:
- for name, query in queries.items():
- try:
- cursor.execute(query)
- except (MySQLdb.ProgrammingError, MySQLdb.OperationalError) as error:
- if self.__is_error_critical(err_class=exc_info()[0], err_text=str(error)):
- raise RuntimeError
- self.error('Removed query: {name}[{query}]. Error: error'.format(name=name,
- query=query,
- error=error))
- self.queries.pop(name)
- continue
- else:
- raw_data[name] = (cursor.fetchall(), cursor.description) if description else cursor.fetchall()
+ cursor = self.__connection.cursor()
+ for name, query in queries.items():
+ try:
+ cursor.execute(query)
+ except (MySQLdb.ProgrammingError, MySQLdb.OperationalError) as error:
+ if self.__is_error_critical(err_class=exc_info()[0], err_text=str(error)):
+ cursor.close()
+ raise RuntimeError
+ self.error('Removed query: {name}[{query}]. Error: error'.format(name=name,
+ query=query,
+ error=error))
+ self.queries.pop(name)
+ continue
+ else:
+ raw_data[name] = (cursor.fetchall(), cursor.description) if description else cursor.fetchall()
+ cursor.close()
self.__connection.commit()
except (MySQLdb.MySQLError, RuntimeError, TypeError, AttributeError):
self.__connection.close()
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py
index dd53fbc1..c7ab7f24 100644
--- a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py
@@ -5,7 +5,7 @@
# SPDX-License-Identifier: GPL-3.0-or-later
from threading import Thread
-from time import sleep
+from time import sleep, time
from third_party.monotonic import monotonic
@@ -17,25 +17,42 @@ RUNTIME_CHART_UPDATE = 'BEGIN netdata.runtime_{job_name} {since_last}\n' \
'SET run_time = {elapsed}\n' \
'END\n'
+PENALTY_EVERY = 5
+MAX_PENALTY = 10 * 60 # 10 minutes
+
class RuntimeCounters:
def __init__(self, configuration):
"""
:param configuration: <dict>
"""
- self.FREQ = int(configuration.pop('update_every'))
- self.START_RUN = 0
- self.NEXT_RUN = 0
- self.PREV_UPDATE = 0
- self.SINCE_UPDATE = 0
- self.ELAPSED = 0
- self.RETRIES = 0
- self.RETRIES_MAX = configuration.pop('retries')
- self.PENALTY = 0
- self.RUNS = 1
+ self.update_every = int(configuration.pop('update_every'))
+ self.do_penalty = configuration.pop('penalty')
+
+ self.start_mono = 0
+ self.start_real = 0
+ self.retries = 0
+ self.penalty = 0
+ self.elapsed = 0
+ self.prev_update = 0
+
+ self.runs = 1
- def is_sleep_time(self):
- return self.START_RUN < self.NEXT_RUN
+ def calc_next(self):
+ self.start_mono = monotonic()
+ return self.start_mono - (self.start_mono % self.update_every) + self.update_every + self.penalty
+
+ def sleep_until_next(self):
+ next_time = self.calc_next()
+ while self.start_mono < next_time:
+ sleep(next_time - self.start_mono)
+ self.start_mono = monotonic()
+ self.start_real = time()
+
+ def handle_retries(self):
+ self.retries += 1
+ if self.do_penalty and self.retries % PENALTY_EVERY == 0:
+ self.penalty = round(min(self.retries * self.update_every / 2, MAX_PENALTY))
class SimpleService(Thread, PythonDLimitedLogger, OldVersionCompatibility, object):
@@ -83,11 +100,11 @@ class SimpleService(Thread, PythonDLimitedLogger, OldVersionCompatibility, objec
@property
def runs_counter(self):
- return self._runtime_counters.RUNS
+ return self._runtime_counters.runs
@property
def update_every(self):
- return self._runtime_counters.FREQ
+ return self._runtime_counters.update_every
@update_every.setter
def update_every(self, value):
@@ -95,7 +112,7 @@ class SimpleService(Thread, PythonDLimitedLogger, OldVersionCompatibility, objec
:param value: <int>
:return:
"""
- self._runtime_counters.FREQ = value
+ self._runtime_counters.update_every = value
def get_update_every(self):
return self.update_every
@@ -163,41 +180,36 @@ class SimpleService(Thread, PythonDLimitedLogger, OldVersionCompatibility, objec
:return: None
"""
job = self._runtime_counters
- self.debug('started, update frequency: {freq}, '
- 'retries: {retries}'.format(freq=job.FREQ, retries=job.RETRIES_MAX - job.RETRIES))
+ self.debug('started, update frequency: {freq}'.format(freq=job.update_every))
while True:
- job.START_RUN = monotonic()
-
- job.NEXT_RUN = job.START_RUN - (job.START_RUN % job.FREQ) + job.FREQ + job.PENALTY
+ job.sleep_until_next()
- self.sleep_until_next_run()
-
- if job.PREV_UPDATE:
- job.SINCE_UPDATE = int((job.START_RUN - job.PREV_UPDATE) * 1e6)
+ since = 0
+ if job.prev_update:
+ since = int((job.start_real - job.prev_update) * 1e6)
try:
- updated = self.update(interval=job.SINCE_UPDATE)
+ updated = self.update(interval=since)
except Exception as error:
self.error('update() unhandled exception: {error}'.format(error=error))
updated = False
- job.RUNS += 1
+ job.runs += 1
if not updated:
- if not self.manage_retries():
- return
+ job.handle_retries()
else:
- job.ELAPSED = int((monotonic() - job.START_RUN) * 1e3)
- job.PREV_UPDATE = job.START_RUN
- job.RETRIES, job.PENALTY = 0, 0
+ job.elapsed = int((monotonic() - job.start_mono) * 1e3)
+ job.prev_update = job.start_real
+ job.retries, job.penalty = 0, 0
safe_print(RUNTIME_CHART_UPDATE.format(job_name=self.name,
- since_last=job.SINCE_UPDATE,
- elapsed=job.ELAPSED))
- self.debug('update => [{status}] (elapsed time: {elapsed}, '
- 'retries left: {retries})'.format(status='OK' if updated else 'FAILED',
- elapsed=job.ELAPSED if updated else '-',
- retries=job.RETRIES_MAX - job.RETRIES))
+ since_last=since,
+ elapsed=job.elapsed))
+ self.debug('update => [{status}] (elapsed time: {elapsed}, failed retries in a row: {retries})'.format(
+ status='OK' if updated else 'FAILED',
+ elapsed=job.elapsed if updated else '-',
+ retries=job.retries))
def update(self, interval):
"""
@@ -233,27 +245,6 @@ class SimpleService(Thread, PythonDLimitedLogger, OldVersionCompatibility, objec
return updated
- def manage_retries(self):
- rc = self._runtime_counters
- rc.RETRIES += 1
- if rc.RETRIES % 5 == 0:
- rc.PENALTY = int(rc.RETRIES * self.update_every / 2)
- if rc.RETRIES >= rc.RETRIES_MAX:
- self.error('stopped after {0} data collection failures in a row'.format(rc.RETRIES_MAX))
- return False
- return True
-
- def sleep_until_next_run(self):
- job = self._runtime_counters
-
- # sleep() is interruptable
- while job.is_sleep_time():
- sleep_time = job.NEXT_RUN - job.START_RUN
- self.debug('sleeping for {sleep_time} to reach frequency of {freq} sec'.format(sleep_time=sleep_time,
- freq=job.FREQ + job.PENALTY))
- sleep(sleep_time)
- job.START_RUN = monotonic()
-
def get_data(self):
return self._get_data()
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py
index e8545530..f5e6380b 100644
--- a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py
@@ -75,9 +75,11 @@ class SocketService(SimpleService):
keyfile=self.key,
certfile=self.cert,
server_side=False,
- cert_reqs=ssl.CERT_NONE)
+ cert_reqs=ssl.CERT_NONE,
+ ssl_version=ssl.PROTOCOL_TLS,
+ )
except (socket.error, ssl.SSLError) as error:
- self.error('Failed to wrap socket.')
+ self.error('failed to wrap socket : {0}'.format(error))
self._disconnect()
self.__socket_config = None
return False
@@ -169,8 +171,8 @@ class SocketService(SimpleService):
self.debug('closing socket')
self._sock.shutdown(2) # 0 - read, 1 - write, 2 - all
self._sock.close()
- except Exception:
- pass
+ except Exception as error:
+ self.error(error)
self._sock = None
def _send(self, request=None):
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py
index 856f3885..011efff9 100644
--- a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py
@@ -26,6 +26,7 @@ class UrlService(SimpleService):
self.method = self.configuration.get('method', 'GET')
self.header = self.configuration.get('header')
self.request_timeout = self.configuration.get('timeout', 1)
+ self.respect_retry_after_header = self.configuration.get('respect_retry_after_header')
self.tls_verify = self.configuration.get('tls_verify')
self.tls_ca_file = self.configuration.get('tls_ca_file')
self.tls_key_file = self.configuration.get('tls_key_file')
@@ -111,12 +112,18 @@ class UrlService(SimpleService):
"""
url = url or self.url
manager = manager or self._manager
- response = manager.request(method=self.method,
- url=url,
- timeout=self.request_timeout,
- retries=retries,
- headers=manager.headers,
- redirect=redirect)
+ retry = urllib3.Retry(retries)
+ if hasattr(retry, 'respect_retry_after_header'):
+ retry.respect_retry_after_header = bool(self.respect_retry_after_header)
+
+ response = manager.request(
+ method=self.method,
+ url=url,
+ timeout=self.request_timeout,
+ retries=retry,
+ headers=manager.headers,
+ redirect=redirect,
+ )
if isinstance(response.data, str):
return response.status, response.data
return response.status, response.data.decode()
diff --git a/collectors/python.d.plugin/python_modules/bases/charts.py b/collectors/python.d.plugin/python_modules/bases/charts.py
index 2963739e..0a071905 100644
--- a/collectors/python.d.plugin/python_modules/bases/charts.py
+++ b/collectors/python.d.plugin/python_modules/bases/charts.py
@@ -45,7 +45,7 @@ def create_runtime_chart(func):
ok = func(*args, **kwargs)
if ok:
safe_print(RUNTIME_CHART_CREATE.format(job_name=self.name,
- update_every=self._runtime_counters.FREQ))
+ update_every=self._runtime_counters.update_every))
return ok
return wrapper
diff --git a/collectors/python.d.plugin/python_modules/bases/loggers.py b/collectors/python.d.plugin/python_modules/bases/loggers.py
index 39be77a7..098294d3 100644
--- a/collectors/python.d.plugin/python_modules/bases/loggers.py
+++ b/collectors/python.d.plugin/python_modules/bases/loggers.py
@@ -34,7 +34,7 @@ def limiter(log_max_count=30, allowed_in_seconds=60):
def on_decorator(func):
def on_call(*args):
- current_time = args[0]._runtime_counters.START_RUN
+ current_time = args[0]._runtime_counters.start_mono
lc = args[0]._logger_counters
if lc.logged and lc.logged % log_max_count == 0:
diff --git a/collectors/python.d.plugin/rabbitmq/README.md b/collectors/python.d.plugin/rabbitmq/README.md
index 22d367c4..4ac60605 100644
--- a/collectors/python.d.plugin/rabbitmq/README.md
+++ b/collectors/python.d.plugin/rabbitmq/README.md
@@ -54,3 +54,5 @@ socket:
When no configuration file is found, module tries to connect to: `localhost:15672`.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Frabbitmq%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py b/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py
index 8298b403..a8f72592 100644
--- a/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py
+++ b/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py
@@ -3,23 +3,12 @@
# Author: l2isbad
# SPDX-License-Identifier: GPL-3.0-or-later
-from collections import namedtuple
from json import loads
-from socket import gethostbyname, gaierror
-from threading import Thread
-try:
- from queue import Queue
-except ImportError:
- from Queue import Queue
from bases.FrameworkServices.UrlService import UrlService
-# default module values (can be overridden per job in `config`)
-update_every = 1
-priority = 60000
-retries = 60
-
-METHODS = namedtuple('METHODS', ['get_data', 'url', 'stats'])
+API_NODE = 'api/nodes'
+API_OVERVIEW = 'api/overview'
NODE_STATS = [
'fd_used',
@@ -64,15 +53,15 @@ CHARTS = {
]
},
'memory': {
- 'options': [None, 'Memory', 'MB', 'overview', 'rabbitmq.memory', 'line'],
+ 'options': [None, 'Memory', 'MiB', 'overview', 'rabbitmq.memory', 'area'],
'lines': [
- ['mem_used', 'used', 'absolute', 1, 1024 << 10]
+ ['mem_used', 'used', 'absolute', 1, 1 << 20]
]
},
'disk_space': {
- 'options': [None, 'Disk Space', 'GB', 'overview', 'rabbitmq.disk_space', 'line'],
+ 'options': [None, 'Disk Space', 'GiB', 'overview', 'rabbitmq.disk_space', 'area'],
'lines': [
- ['disk_free', 'free', 'absolute', 1, 1024 ** 3]
+ ['disk_free', 'free', 'absolute', 1, 1 << 30]
]
},
'socket_descriptors': {
@@ -111,7 +100,7 @@ CHARTS = {
]
},
'message_rates': {
- 'options': [None, 'Message Rates', 'messages/s', 'overview', 'rabbitmq.message_rates', 'stacked'],
+ 'options': [None, 'Message Rates', 'messages/s', 'overview', 'rabbitmq.message_rates', 'line'],
'lines': [
['message_stats_ack', 'ack', 'incremental'],
['message_stats_redeliver', 'redeliver', 'incremental'],
@@ -127,74 +116,62 @@ class Service(UrlService):
UrlService.__init__(self, configuration=configuration, name=name)
self.order = ORDER
self.definitions = CHARTS
- self.host = self.configuration.get('host', '127.0.0.1')
- self.port = self.configuration.get('port', 15672)
- self.scheme = self.configuration.get('scheme', 'http')
+ self.url = '{0}://{1}:{2}'.format(
+ configuration.get('scheme', 'http'),
+ configuration.get('host', '127.0.0.1'),
+ configuration.get('port', 15672),
+ )
+ self.node_name = str()
- def check(self):
- # We can't start if <host> AND <port> not specified
- if not (self.host and self.port):
- self.error('Host is not defined in the module configuration file')
- return False
+ def _get_data(self):
+ data = dict()
- # Hostname -> ip address
- try:
- self.host = gethostbyname(self.host)
- except gaierror as error:
- self.error(str(error))
- return False
-
- # Add handlers (auth, self signed cert accept)
- self.url = '{scheme}://{host}:{port}/api'.format(scheme=self.scheme,
- host=self.host,
- port=self.port)
- # Add methods
- api_node = self.url + '/nodes'
- api_overview = self.url + '/overview'
- self.methods = [METHODS(get_data=self._get_overview_stats,
- url=api_node,
- stats=NODE_STATS),
- METHODS(get_data=self._get_overview_stats,
- url=api_overview,
- stats=OVERVIEW_STATS)]
- return UrlService.check(self)
+ stats = self.get_overview_stats()
- def _get_data(self):
- threads = list()
- queue = Queue()
- result = dict()
+ if not stats:
+ return None
+
+ data.update(stats)
+
+ stats = self.get_nodes_stats()
+
+ if not stats:
+ return None
+
+ data.update(stats)
- for method in self.methods:
- th = Thread(target=method.get_data,
- args=(queue, method.url, method.stats))
- th.start()
- threads.append(th)
+ return data or None
- for thread in threads:
- thread.join()
- result.update(queue.get())
+ def get_overview_stats(self):
+ url = '{0}/{1}'.format(self.url, API_OVERVIEW)
- return result or None
+ raw = self._get_raw_data(url)
- def _get_overview_stats(self, queue, url, stats):
- """
- Format data received from http request
- :return: dict
- """
+ if not raw:
+ return None
- raw_data = self._get_raw_data(url)
+ data = loads(raw)
- if not raw_data:
- return queue.put(dict())
- data = loads(raw_data)
- data = data[0] if isinstance(data, list) else data
+ self.node_name = data['node']
- to_netdata = fetch_data(raw_data=data, metrics=stats)
- return queue.put(to_netdata)
+ return fetch_data(raw_data=data, metrics=OVERVIEW_STATS)
+
+ def get_nodes_stats(self):
+ url = '{0}/{1}/{2}'.format(self.url, API_NODE, self.node_name)
+
+ raw = self._get_raw_data(url)
+
+ if not raw:
+ return None
+
+ data = loads(raw)
+
+ return fetch_data(raw_data=data, metrics=NODE_STATS)
def fetch_data(raw_data, metrics):
data = dict()
+
for metric in metrics:
value = raw_data
metrics_list = metric.split('.')
@@ -204,4 +181,5 @@ def fetch_data(raw_data, metrics):
except KeyError:
continue
data['_'.join(metrics_list)] = value
+
return data
diff --git a/collectors/python.d.plugin/rabbitmq/rabbitmq.conf b/collectors/python.d.plugin/rabbitmq/rabbitmq.conf
index 3f90da8a..ae0dbdb7 100644
--- a/collectors/python.d.plugin/rabbitmq/rabbitmq.conf
+++ b/collectors/python.d.plugin/rabbitmq/rabbitmq.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, rabbitmq plugin also supports the following:
diff --git a/collectors/python.d.plugin/redis/README.md b/collectors/python.d.plugin/redis/README.md
index 8d21df0c..0bea0376 100644
--- a/collectors/python.d.plugin/redis/README.md
+++ b/collectors/python.d.plugin/redis/README.md
@@ -40,3 +40,5 @@ localhost:
When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:6379`.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fredis%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/redis/redis.chart.py b/collectors/python.d.plugin/redis/redis.chart.py
index 37d55ebf..9dbb2c16 100644
--- a/collectors/python.d.plugin/redis/redis.chart.py
+++ b/collectors/python.d.plugin/redis/redis.chart.py
@@ -47,13 +47,13 @@ CHARTS = {
]
},
'hit_rate': {
- 'options': [None, 'Hit rate', 'percent', 'hits', 'redis.hit_rate', 'line'],
+ 'options': [None, 'Hit rate', 'percentage', 'hits', 'redis.hit_rate', 'line'],
'lines': [
['hit_rate', 'rate', 'absolute']
]
},
'memory': {
- 'options': [None, 'Memory utilization', 'kilobytes', 'memory', 'redis.memory', 'line'],
+ 'options': [None, 'Memory utilization', 'KiB', 'memory', 'redis.memory', 'line'],
'lines': [
['used_memory', 'total', 'absolute', 1, 1024],
['used_memory_lua', 'lua', 'absolute', 1, 1024]
@@ -62,8 +62,8 @@ CHARTS = {
'net': {
'options': [None, 'Bandwidth', 'kilobits/s', 'network', 'redis.net', 'area'],
'lines': [
- ['total_net_input_bytes', 'in', 'incremental', 8, 1024],
- ['total_net_output_bytes', 'out', 'incremental', -8, 1024]
+ ['total_net_input_bytes', 'in', 'incremental', 8, 1000],
+ ['total_net_output_bytes', 'out', 'incremental', -8, 1000]
]
},
'keys_redis': {
@@ -146,16 +146,13 @@ RE = re.compile(r'\n([a-z_0-9 ]+):(?:keys=)?([^,\r]+)')
class Service(SocketService):
def __init__(self, configuration=None, name=None):
SocketService.__init__(self, configuration=configuration, name=name)
- self._keep_alive = True
-
self.order = list()
self.definitions = dict()
-
+ self._keep_alive = True
self.host = self.configuration.get('host', 'localhost')
self.port = self.configuration.get('port', 6379)
self.unix_socket = self.configuration.get('socket')
p = self.configuration.get('pass')
-
self.auth_request = 'AUTH {0} \r\n'.format(p).encode() if p else None
self.request = 'INFO\r\n'.encode()
self.bgsave_time = 0
diff --git a/collectors/python.d.plugin/redis/redis.conf b/collectors/python.d.plugin/redis/redis.conf
index 6363f6da..b456d75d 100644
--- a/collectors/python.d.plugin/redis/redis.conf
+++ b/collectors/python.d.plugin/redis/redis.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, redis also supports the following:
diff --git a/collectors/python.d.plugin/rethinkdbs/README.md b/collectors/python.d.plugin/rethinkdbs/README.md
index 5d357fa4..183c7f73 100644
--- a/collectors/python.d.plugin/rethinkdbs/README.md
+++ b/collectors/python.d.plugin/rethinkdbs/README.md
@@ -32,3 +32,5 @@ localhost:
When no configuration file is found, module tries to connect to `127.0.0.1:28015`.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Frethinkdbs%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py
index 127e9ad4..da2f26f4 100644
--- a/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py
+++ b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py
@@ -136,13 +136,11 @@ class Service(SimpleService):
SimpleService.__init__(self, configuration=configuration, name=name)
self.order = list(ORDER)
self.definitions = cluster_charts()
-
self.host = self.configuration.get('host', '127.0.0.1')
self.port = self.configuration.get('port', 28015)
self.user = self.configuration.get('user', 'admin')
self.password = self.configuration.get('password')
self.timeout = self.configuration.get('timeout', 2)
-
self.conn = None
self.alive = True
diff --git a/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf
index 73544fc2..d671acbb 100644
--- a/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf
+++ b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, rethinkdb also supports the following:
diff --git a/collectors/python.d.plugin/retroshare/README.md b/collectors/python.d.plugin/retroshare/README.md
index e95095c6..a8a58880 100644
--- a/collectors/python.d.plugin/retroshare/README.md
+++ b/collectors/python.d.plugin/retroshare/README.md
@@ -1 +1,3 @@
# retroshare
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fretroshare%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/retroshare/retroshare.chart.py b/collectors/python.d.plugin/retroshare/retroshare.chart.py
index 1d8e3505..feb871fb 100644
--- a/collectors/python.d.plugin/retroshare/retroshare.chart.py
+++ b/collectors/python.d.plugin/retroshare/retroshare.chart.py
@@ -7,26 +7,25 @@ import json
from bases.FrameworkServices.UrlService import UrlService
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['bandwidth', 'peers', 'dht']
+ORDER = [
+ 'bandwidth',
+ 'peers',
+ 'dht',
+]
CHARTS = {
'bandwidth': {
- 'options': [None, 'RetroShare Bandwidth', 'kB/s', 'RetroShare', 'retroshare.bandwidth', 'area'],
+ 'options': [None, 'RetroShare Bandwidth', 'kilobits/s', 'RetroShare', 'retroshare.bandwidth', 'area'],
'lines': [
- ['bandwidth_up_kb', 'Upload'],
+ ['bandwidth_up_kb', 'Upload'],
['bandwidth_down_kb', 'Download']
]
},
'peers': {
'options': [None, 'RetroShare Peers', 'peers', 'RetroShare', 'retroshare.peers', 'line'],
'lines': [
- ['peers_all', 'All friends'],
+ ['peers_all', 'All friends'],
['peers_connected', 'Connected friends']
]
},
@@ -34,7 +33,7 @@ CHARTS = {
'options': [None, 'Retroshare DHT', 'peers', 'RetroShare', 'retroshare.dht', 'line'],
'lines': [
['dht_size_all', 'DHT nodes estimated'],
- ['dht_size_rs', 'RS nodes estimated']
+ ['dht_size_rs', 'RS nodes estimated']
]
}
}
@@ -43,9 +42,9 @@ CHARTS = {
class Service(UrlService):
def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name)
- self.baseurl = self.configuration.get('url', 'http://localhost:9090')
self.order = ORDER
self.definitions = CHARTS
+ self.baseurl = self.configuration.get('url', 'http://localhost:9090')
def _get_stats(self):
"""
diff --git a/collectors/python.d.plugin/retroshare/retroshare.conf b/collectors/python.d.plugin/retroshare/retroshare.conf
index 9c92583f..3d0af538 100644
--- a/collectors/python.d.plugin/retroshare/retroshare.conf
+++ b/collectors/python.d.plugin/retroshare/retroshare.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, RetroShare also supports the following:
diff --git a/collectors/python.d.plugin/samba/README.md b/collectors/python.d.plugin/samba/README.md
index 44610d37..97f2e3d3 100644
--- a/collectors/python.d.plugin/samba/README.md
+++ b/collectors/python.d.plugin/samba/README.md
@@ -65,3 +65,5 @@ samba: yes
```
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fsamba%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/samba/samba.chart.py b/collectors/python.d.plugin/samba/samba.chart.py
index b2278de9..ac89c29b 100644
--- a/collectors/python.d.plugin/samba/samba.chart.py
+++ b/collectors/python.d.plugin/samba/samba.chart.py
@@ -24,10 +24,7 @@ from bases.FrameworkServices.ExecutableService import ExecutableService
disabled_by_default = True
-# default module values (can be overridden per job in `config`)
update_every = 5
-priority = 60000
-retries = 60
ORDER = [
'syscall_rw',
@@ -41,14 +38,14 @@ ORDER = [
CHARTS = {
'syscall_rw': {
- 'options': [None, 'R/Ws', 'kilobytes/s', 'syscall', 'syscall.rw', 'area'],
+ 'options': [None, 'R/Ws', 'KiB/s', 'syscall', 'syscall.rw', 'area'],
'lines': [
['syscall_sendfile_bytes', 'sendfile', 'incremental', 1, 1024],
['syscall_recvfile_bytes', 'recvfile', 'incremental', -1, 1024]
]
},
'smb2_rw': {
- 'options': [None, 'R/Ws', 'kilobytes/s', 'smb2', 'smb2.rw', 'area'],
+ 'options': [None, 'R/Ws', 'KiB/s', 'smb2', 'smb2.rw', 'area'],
'lines': [
['smb2_read_outbytes', 'readout', 'incremental', 1, 1024],
['smb2_write_inbytes', 'writein', 'incremental', -1, 1024],
diff --git a/collectors/python.d.plugin/samba/samba.conf b/collectors/python.d.plugin/samba/samba.conf
index ee513c60..db15d4e9 100644
--- a/collectors/python.d.plugin/samba/samba.conf
+++ b/collectors/python.d.plugin/samba/samba.conf
@@ -27,11 +27,9 @@ update_every: 5
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,5 +56,5 @@ update_every: 5
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds \ No newline at end of file
diff --git a/collectors/python.d.plugin/sensors/README.md b/collectors/python.d.plugin/sensors/README.md
index eb1642d9..e3f956f1 100644
--- a/collectors/python.d.plugin/sensors/README.md
+++ b/collectors/python.d.plugin/sensors/README.md
@@ -15,3 +15,5 @@ We are tracking such cases in issue [#827](https://github.com/netdata/netdata/is
Please join this discussion for help.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fsensors%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/sensors/sensors.chart.py b/collectors/python.d.plugin/sensors/sensors.chart.py
index d70af3b0..e622eb8e 100644
--- a/collectors/python.d.plugin/sensors/sensors.chart.py
+++ b/collectors/python.d.plugin/sensors/sensors.chart.py
@@ -7,8 +7,6 @@ from third_party import lm_sensors as sensors
from bases.FrameworkServices.SimpleService import SimpleService
-# default module values (can be overridden per job in `config`)
-# update_every = 2
ORDER = [
'temperature',
@@ -139,7 +137,7 @@ class Service(SimpleService):
except sensors.SensorsError as error:
self.error('{0}: {1}'.format(sf.name, error))
continue
- if not vals or vals[0] == 0:
+ if not vals or (vals[0] == 0 and feature.type != 1):
continue
if TYPE_MAP[feature.type] == sensor:
# create chart
diff --git a/collectors/python.d.plugin/sensors/sensors.conf b/collectors/python.d.plugin/sensors/sensors.conf
index 83bbffd7..d3369ba6 100644
--- a/collectors/python.d.plugin/sensors/sensors.conf
+++ b/collectors/python.d.plugin/sensors/sensors.conf
@@ -19,11 +19,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
diff --git a/collectors/python.d.plugin/smartd_log/README.md b/collectors/python.d.plugin/smartd_log/README.md
index a31ad0c7..3b0816fb 100644
--- a/collectors/python.d.plugin/smartd_log/README.md
+++ b/collectors/python.d.plugin/smartd_log/README.md
@@ -99,3 +99,5 @@ local:
If no configuration is given, module will attempt to read log files in `/var/log/smartd/` directory.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fsmartd_log%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/smartd_log/smartd_log.chart.py b/collectors/python.d.plugin/smartd_log/smartd_log.chart.py
index 13762fab..871025a4 100644
--- a/collectors/python.d.plugin/smartd_log/smartd_log.chart.py
+++ b/collectors/python.d.plugin/smartd_log/smartd_log.chart.py
@@ -268,7 +268,7 @@ CHARTS = {
'algo': INCREMENTAL,
},
'reserved_block_count': {
- 'options': [None, 'Reserved Block Count', '%', 'wear', 'smartd_log.reserved_block_count', 'line'],
+ 'options': [None, 'Reserved Block Count', 'percentage', 'wear', 'smartd_log.reserved_block_count', 'line'],
'lines': [],
'attrs': [ATTR170],
'algo': ABSOLUTE,
@@ -321,7 +321,7 @@ CHARTS = {
},
'percent_lifetime_used': {
- 'options': [None, 'Percent Lifetime Used', '%', 'wear', 'smartd_log.percent_lifetime_used', 'line'],
+ 'options': [None, 'Percent Lifetime Used', 'percentage', 'wear', 'smartd_log.percent_lifetime_used', 'line'],
'lines': [],
'attrs': [ATTR202],
'algo': ABSOLUTE,
@@ -453,6 +453,11 @@ class Ata190(BaseAtaSmartAttribute):
return 100 - int(self.normalized_value)
+class Ata194(BaseAtaSmartAttribute):
+ def value(self):
+ return min(int(self.normalized_value), int(self.raw_value))
+
+
class BaseSCSISmartAttribute:
def __init__(self, name, raw_value):
self.name = name
@@ -474,10 +479,11 @@ def ata_attribute_factory(value):
return Ata9(*value)
elif name == ATTR190:
return Ata190(*value)
+ elif name == ATTR194:
+ return Ata194(*value)
elif name in [
ATTR1,
ATTR7,
- ATTR194,
ATTR202,
ATTR206,
]:
@@ -580,11 +586,9 @@ class Service(SimpleService):
SimpleService.__init__(self, configuration=configuration, name=name)
self.order = ORDER
self.definitions = deepcopy(CHARTS)
-
self.log_path = configuration.get('log_path', DEF_PATH)
self.age = configuration.get('age', DEF_AGE)
self.exclude = configuration.get('exclude_disks', str()).split()
-
self.disks = list()
self.runs = 0
@@ -646,6 +650,10 @@ class Service(SimpleService):
return len(self.disks)
def create_disk_from_file(self, full_name, current_time):
+ if not full_name.endswith(CSV):
+ self.debug('skipping {0}: not a csv file'.format(full_name))
+ return None
+
name = os.path.basename(full_name).split('.')[-3]
path = os.path.join(self.log_path, full_name)
@@ -655,10 +663,6 @@ class Service(SimpleService):
if [p for p in self.exclude if p in name]:
return None
- if not full_name.endswith(CSV):
- self.debug('skipping {0}: not a csv file'.format(full_name))
- return None
-
if not os.access(path, os.R_OK):
self.debug('skipping {0}: not readable'.format(full_name))
return None
diff --git a/collectors/python.d.plugin/smartd_log/smartd_log.conf b/collectors/python.d.plugin/smartd_log/smartd_log.conf
index ab7f45b0..4f138d17 100644
--- a/collectors/python.d.plugin/smartd_log/smartd_log.conf
+++ b/collectors/python.d.plugin/smartd_log/smartd_log.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, smartd_log also supports the following:
diff --git a/collectors/python.d.plugin/spigotmc/README.md b/collectors/python.d.plugin/spigotmc/README.md
index ae560258..c3893055 100644
--- a/collectors/python.d.plugin/spigotmc/README.md
+++ b/collectors/python.d.plugin/spigotmc/README.md
@@ -20,3 +20,5 @@ password: pass
By default, a connection to port 25575 on the local system is attempted with an empty password.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fspigotmc%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/spigotmc/spigotmc.chart.py b/collectors/python.d.plugin/spigotmc/spigotmc.chart.py
index a5e5ee0e..09674f5c 100644
--- a/collectors/python.d.plugin/spigotmc/spigotmc.chart.py
+++ b/collectors/python.d.plugin/spigotmc/spigotmc.chart.py
@@ -16,7 +16,10 @@ update_every = 5
PRECISION = 100
-ORDER = ['tps', 'users']
+ORDER = [
+ 'tps',
+ 'users',
+]
CHARTS = {
'tps': {
diff --git a/collectors/python.d.plugin/spigotmc/spigotmc.conf b/collectors/python.d.plugin/spigotmc/spigotmc.conf
index 3ba492de..ccb5e263 100644
--- a/collectors/python.d.plugin/spigotmc/spigotmc.conf
+++ b/collectors/python.d.plugin/spigotmc/spigotmc.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# In addition to the above, spigotmc supports the following:
diff --git a/collectors/python.d.plugin/springboot/README.md b/collectors/python.d.plugin/springboot/README.md
index a1817cc2..b5b776dd 100644
--- a/collectors/python.d.plugin/springboot/README.md
+++ b/collectors/python.d.plugin/springboot/README.md
@@ -120,3 +120,5 @@ You can disable the default charts by set `defaults.<chart-id>: false`.
The dimension name of extras charts should replace `.` to `_`.
Please check [springboot.conf](springboot.conf) for more examples.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fspringboot%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/springboot/springboot.chart.py b/collectors/python.d.plugin/springboot/springboot.chart.py
index 7df37e1d..eec870eb 100644
--- a/collectors/python.d.plugin/springboot/springboot.chart.py
+++ b/collectors/python.d.plugin/springboot/springboot.chart.py
@@ -6,13 +6,14 @@
import json
from bases.FrameworkServices.UrlService import UrlService
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
-
-DEFAULT_ORDER = ['response_code', 'threads', 'gc_time', 'gc_ope', 'heap']
+DEFAULT_ORDER = [
+ 'response_code',
+ 'threads',
+ 'gc_time',
+ 'gc_ope',
+ 'heap',
+]
DEFAULT_CHARTS = {
'response_code': {
@@ -60,7 +61,7 @@ DEFAULT_CHARTS = {
]
},
'heap': {
- 'options': [None, "Heap Memory Usage", "KB", "heap memory", "springboot.heap", "area"],
+ 'options': [None, "Heap Memory Usage", "KiB", "heap memory", "springboot.heap", "area"],
'lines': [
["heap_committed", 'committed', "absolute"],
["heap_used", 'used', "absolute"],
diff --git a/collectors/python.d.plugin/springboot/springboot.conf b/collectors/python.d.plugin/springboot/springboot.conf
index 40b5fb43..13a39895 100644
--- a/collectors/python.d.plugin/springboot/springboot.conf
+++ b/collectors/python.d.plugin/springboot/springboot.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -53,7 +51,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, this plugin also supports the following:
diff --git a/collectors/python.d.plugin/squid/README.md b/collectors/python.d.plugin/squid/README.md
index 9c9b62f2..b278f419 100644
--- a/collectors/python.d.plugin/squid/README.md
+++ b/collectors/python.d.plugin/squid/README.md
@@ -36,3 +36,5 @@ local:
Without any configuration module will try to autodetect where squid presents its `counters` data
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fsquid%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/squid/squid.chart.py b/collectors/python.d.plugin/squid/squid.chart.py
index fd54168f..c00556b5 100644
--- a/collectors/python.d.plugin/squid/squid.chart.py
+++ b/collectors/python.d.plugin/squid/squid.chart.py
@@ -6,13 +6,12 @@
from bases.FrameworkServices.SocketService import SocketService
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
-
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['clients_net', 'clients_requests', 'servers_net', 'servers_requests']
+ORDER = [
+ 'clients_net',
+ 'clients_requests',
+ 'servers_net',
+ 'servers_requests',
+]
CHARTS = {
'clients_net': {
diff --git a/collectors/python.d.plugin/squid/squid.conf b/collectors/python.d.plugin/squid/squid.conf
index 564187f0..b90a52c0 100644
--- a/collectors/python.d.plugin/squid/squid.conf
+++ b/collectors/python.d.plugin/squid/squid.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, squid also supports the following:
diff --git a/collectors/python.d.plugin/tomcat/README.md b/collectors/python.d.plugin/tomcat/README.md
index e548bd33..21e3896a 100644
--- a/collectors/python.d.plugin/tomcat/README.md
+++ b/collectors/python.d.plugin/tomcat/README.md
@@ -31,3 +31,5 @@ Without configuration, module attempts to connect to `http://localhost:8080/mana
So it will probably fail.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Ftomcat%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/tomcat/tomcat.chart.py b/collectors/python.d.plugin/tomcat/tomcat.chart.py
index 3c2d0ed4..01578c56 100644
--- a/collectors/python.d.plugin/tomcat/tomcat.chart.py
+++ b/collectors/python.d.plugin/tomcat/tomcat.chart.py
@@ -8,13 +8,18 @@ import xml.etree.ElementTree as ET
from bases.FrameworkServices.UrlService import UrlService
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
+MiB = 1 << 20
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['accesses', 'bandwidth', 'processing_time', 'threads', 'jvm', 'jvm_eden', 'jvm_survivor', 'jvm_tenured']
+ORDER = [
+ 'accesses',
+ 'bandwidth',
+ 'processing_time',
+ 'threads',
+ 'jvm',
+ 'jvm_eden',
+ 'jvm_survivor',
+ 'jvm_tenured',
+]
CHARTS = {
'accesses': {
@@ -25,7 +30,7 @@ CHARTS = {
]
},
'bandwidth': {
- 'options': [None, 'Bandwidth', 'KB/s', 'statistics', 'tomcat.bandwidth', 'area'],
+ 'options': [None, 'Bandwidth', 'KiB/s', 'statistics', 'tomcat.bandwidth', 'area'],
'lines': [
['bytesSent', 'sent', 'incremental', 1, 1024],
['bytesReceived', 'received', 'incremental', 1, 1024],
@@ -45,39 +50,39 @@ CHARTS = {
]
},
'jvm': {
- 'options': [None, 'JVM Memory Pool Usage', 'MB', 'memory', 'tomcat.jvm', 'stacked'],
+ 'options': [None, 'JVM Memory Pool Usage', 'MiB', 'memory', 'tomcat.jvm', 'stacked'],
'lines': [
- ['free', 'free', 'absolute', 1, 1048576],
- ['eden_used', 'eden', 'absolute', 1, 1048576],
- ['survivor_used', 'survivor', 'absolute', 1, 1048576],
- ['tenured_used', 'tenured', 'absolute', 1, 1048576],
- ['code_cache_used', 'code cache', 'absolute', 1, 1048576],
- ['compressed_used', 'compressed', 'absolute', 1, 1048576],
- ['metaspace_used', 'metaspace', 'absolute', 1, 1048576],
+ ['free', 'free', 'absolute', 1, MiB],
+ ['eden_used', 'eden', 'absolute', 1, MiB],
+ ['survivor_used', 'survivor', 'absolute', 1, MiB],
+ ['tenured_used', 'tenured', 'absolute', 1, MiB],
+ ['code_cache_used', 'code cache', 'absolute', 1, MiB],
+ ['compressed_used', 'compressed', 'absolute', 1, MiB],
+ ['metaspace_used', 'metaspace', 'absolute', 1, MiB],
]
},
'jvm_eden': {
- 'options': [None, 'Eden Memory Usage', 'MB', 'memory', 'tomcat.jvm_eden', 'area'],
+ 'options': [None, 'Eden Memory Usage', 'MiB', 'memory', 'tomcat.jvm_eden', 'area'],
'lines': [
- ['eden_used', 'used', 'absolute', 1, 1048576],
- ['eden_committed', 'committed', 'absolute', 1, 1048576],
- ['eden_max', 'max', 'absolute', 1, 1048576]
+ ['eden_used', 'used', 'absolute', 1, MiB],
+ ['eden_committed', 'committed', 'absolute', 1, MiB],
+ ['eden_max', 'max', 'absolute', 1, MiB]
]
},
'jvm_survivor': {
- 'options': [None, 'Survivor Memory Usage', 'MB', 'memory', 'tomcat.jvm_survivor', 'area'],
+ 'options': [None, 'Survivor Memory Usage', 'MiB', 'memory', 'tomcat.jvm_survivor', 'area'],
'lines': [
- ['survivor_used', 'used', 'absolute', 1, 1048576],
- ['survivor_committed', 'committed', 'absolute', 1, 1048576],
- ['survivor_max', 'max', 'absolute', 1, 1048576]
+ ['survivor_used', 'used', 'absolute', 1, MiB],
+ ['survivor_committed', 'committed', 'absolute', 1, MiB],
+ ['survivor_max', 'max', 'absolute', 1, MiB],
]
},
'jvm_tenured': {
- 'options': [None, 'Tenured Memory Usage', 'MB', 'memory', 'tomcat.jvm_tenured', 'area'],
+ 'options': [None, 'Tenured Memory Usage', 'MiB', 'memory', 'tomcat.jvm_tenured', 'area'],
'lines': [
- ['tenured_used', 'used', 'absolute', 1, 1048576],
- ['tenured_committed', 'committed', 'absolute', 1, 1048576],
- ['tenured_max', 'max', 'absolute', 1, 1048576]
+ ['tenured_used', 'used', 'absolute', 1, MiB],
+ ['tenured_committed', 'committed', 'absolute', 1, MiB],
+ ['tenured_max', 'max', 'absolute', 1, MiB]
]
}
}
@@ -86,10 +91,10 @@ CHARTS = {
class Service(UrlService):
def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name)
- self.url = self.configuration.get('url', 'http://127.0.0.1:8080/manager/status?XML=true')
- self.connector_name = self.configuration.get('connector_name', None)
self.order = ORDER
self.definitions = CHARTS
+ self.url = self.configuration.get('url', 'http://127.0.0.1:8080/manager/status?XML=true')
+ self.connector_name = self.configuration.get('connector_name', None)
def _get_data(self):
"""
diff --git a/collectors/python.d.plugin/tomcat/tomcat.conf b/collectors/python.d.plugin/tomcat/tomcat.conf
index c63f06cf..009591bd 100644
--- a/collectors/python.d.plugin/tomcat/tomcat.conf
+++ b/collectors/python.d.plugin/tomcat/tomcat.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, tomcat also supports the following:
diff --git a/collectors/python.d.plugin/tor/README.md b/collectors/python.d.plugin/tor/README.md
index 4a883373..2ce0f25f 100644
--- a/collectors/python.d.plugin/tor/README.md
+++ b/collectors/python.d.plugin/tor/README.md
@@ -44,3 +44,5 @@ For more options please read the manual.
Without configuration, module attempts to connect to `127.0.0.1:9051`.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Ftor%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/tor/tor.chart.py b/collectors/python.d.plugin/tor/tor.chart.py
index b77632bd..dd61e6e9 100644
--- a/collectors/python.d.plugin/tor/tor.chart.py
+++ b/collectors/python.d.plugin/tor/tor.chart.py
@@ -24,7 +24,7 @@ ORDER = [
CHARTS = {
'traffic': {
- 'options': [None, 'Tor Traffic', 'KB/s', 'traffic', 'tor.traffic', 'area'],
+ 'options': [None, 'Tor Traffic', 'KiB/s', 'traffic', 'tor.traffic', 'area'],
'lines': [
['read', 'read', 'incremental', 1, 1024],
['write', 'write', 'incremental', 1, -1024],
@@ -39,10 +39,8 @@ class Service(SimpleService):
super(Service, self).__init__(configuration=configuration, name=name)
self.order = ORDER
self.definitions = CHARTS
-
self.port = self.configuration.get('control_port', DEF_PORT)
self.password = self.configuration.get('password')
-
self.use_socket = isinstance(self.port, str) and self.port != DEF_PORT and not self.port.isdigit()
self.conn = None
self.alive = False
diff --git a/collectors/python.d.plugin/tor/tor.conf b/collectors/python.d.plugin/tor/tor.conf
index 8245414f..91b517a6 100644
--- a/collectors/python.d.plugin/tor/tor.conf
+++ b/collectors/python.d.plugin/tor/tor.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 10 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, tor plugin also supports the following:
diff --git a/collectors/python.d.plugin/traefik/README.md b/collectors/python.d.plugin/traefik/README.md
index 9b4a1820..61e0fdb7 100644
--- a/collectors/python.d.plugin/traefik/README.md
+++ b/collectors/python.d.plugin/traefik/README.md
@@ -46,9 +46,10 @@ priority : 60000
local:
url : 'http://localhost:8080/health'
- retries : 10
```
Without configuration, module attempts to connect to `http://localhost:8080/health`.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Ftraefik%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/traefik/traefik.chart.py b/collectors/python.d.plugin/traefik/traefik.chart.py
index dc893322..570339d0 100644
--- a/collectors/python.d.plugin/traefik/traefik.chart.py
+++ b/collectors/python.d.plugin/traefik/traefik.chart.py
@@ -3,16 +3,13 @@
# Author: Alexandre Menezes (@ale_menezes)
# SPDX-License-Identifier: GPL-3.0-or-later
-from json import loads
from collections import defaultdict
+
+from json import loads
+
from bases.FrameworkServices.UrlService import UrlService
-# default module values (can be overridden per job in `config`)
-update_every = 1
-priority = 60000
-retries = 10
-# charts order (can be overridden if you want less charts, or different order)
ORDER = [
'response_statuses',
'response_codes',
@@ -99,14 +96,22 @@ class Service(UrlService):
self.url = self.configuration.get('url', 'http://localhost:8080/health')
self.order = ORDER
self.definitions = CHARTS
- self.data = {
- 'successful_requests': 0, 'redirects': 0, 'bad_requests': 0,
- 'server_errors': 0, 'other_requests': 0, '1xx': 0, '2xx': 0,
- '3xx': 0, '4xx': 0, '5xx': 0, 'other': 0,
- 'average_response_time_per_iteration_sec': 0
- }
self.last_total_response_time = 0
self.last_total_count = 0
+ self.data = {
+ 'successful_requests': 0,
+ 'redirects': 0,
+ 'bad_requests': 0,
+ 'server_errors': 0,
+ 'other_requests': 0,
+ '1xx': 0,
+ '2xx': 0,
+ '3xx': 0,
+ '4xx': 0,
+ '5xx': 0,
+ 'other': 0,
+ 'average_response_time_per_iteration_sec': 0,
+ }
def _get_data(self):
data = self._get_raw_data()
diff --git a/collectors/python.d.plugin/traefik/traefik.conf b/collectors/python.d.plugin/traefik/traefik.conf
index 909b9e54..e3f182d3 100644
--- a/collectors/python.d.plugin/traefik/traefik.conf
+++ b/collectors/python.d.plugin/traefik/traefik.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 10 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, traefik plugin also supports the following:
diff --git a/collectors/python.d.plugin/unbound/README.md b/collectors/python.d.plugin/unbound/README.md
index 3b4fa16f..e213683c 100644
--- a/collectors/python.d.plugin/unbound/README.md
+++ b/collectors/python.d.plugin/unbound/README.md
@@ -74,3 +74,5 @@ While it's a bit more complicated to set up correctly, it is recommended
that you use a UNIX socket as it provides far better performance.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Funbound%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/unbound/unbound.chart.py b/collectors/python.d.plugin/unbound/unbound.chart.py
index 52fcbf7e..adb58d41 100644
--- a/collectors/python.d.plugin/unbound/unbound.chart.py
+++ b/collectors/python.d.plugin/unbound/unbound.chart.py
@@ -13,7 +13,11 @@ from bases.loaders import YamlOrderedLoader
PRECISION = 1000
-ORDER = ['queries', 'recursion', 'reqlist']
+ORDER = [
+ 'queries',
+ 'recursion',
+ 'reqlist',
+]
CHARTS = {
'queries': {
diff --git a/collectors/python.d.plugin/unbound/unbound.conf b/collectors/python.d.plugin/unbound/unbound.conf
index 46c4b097..68561366 100644
--- a/collectors/python.d.plugin/unbound/unbound.conf
+++ b/collectors/python.d.plugin/unbound/unbound.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_everye
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, unbound also supports the following:
diff --git a/collectors/python.d.plugin/uwsgi/README.md b/collectors/python.d.plugin/uwsgi/README.md
index a062710d..9d455cfc 100644
--- a/collectors/python.d.plugin/uwsgi/README.md
+++ b/collectors/python.d.plugin/uwsgi/README.md
@@ -35,3 +35,5 @@ localhost:
```
When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:1717`.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fuwsgi%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/uwsgi/uwsgi.chart.py b/collectors/python.d.plugin/uwsgi/uwsgi.chart.py
index 5ebcfb55..511b770c 100644
--- a/collectors/python.d.plugin/uwsgi/uwsgi.chart.py
+++ b/collectors/python.d.plugin/uwsgi/uwsgi.chart.py
@@ -7,10 +7,6 @@ import json
from copy import deepcopy
from bases.FrameworkServices.SocketService import SocketService
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
ORDER = [
'requests',
@@ -40,27 +36,27 @@ CHARTS = {
]
},
'tx': {
- 'options': [None, 'Transmitted data', 'KB/s', 'requests', 'uwsgi.tx', 'stacked'],
+ 'options': [None, 'Transmitted data', 'KiB/s', 'requests', 'uwsgi.tx', 'stacked'],
'lines': [
['tx', 'tx', 'incremental']
]
},
'avg_rt': {
- 'options': [None, 'Average request time', 'ms', 'requests', 'uwsgi.avg_rt', 'line'],
+ 'options': [None, 'Average request time', 'milliseconds', 'requests', 'uwsgi.avg_rt', 'line'],
'lines': [
['avg_rt', 'avg_rt', 'absolute']
]
},
'memory_rss': {
- 'options': [None, 'RSS (Resident Set Size)', 'MB', 'memory', 'uwsgi.memory_rss', 'stacked'],
+ 'options': [None, 'RSS (Resident Set Size)', 'MiB', 'memory', 'uwsgi.memory_rss', 'stacked'],
'lines': [
- ['memory_rss', 'memory_rss', 'absolute', 1, 1024 * 1024]
+ ['memory_rss', 'memory_rss', 'absolute', 1, 1 << 20]
]
},
'memory_vsz': {
- 'options': [None, 'VSZ (Virtual Memory Size)', 'MB', 'memory', 'uwsgi.memory_vsz', 'stacked'],
+ 'options': [None, 'VSZ (Virtual Memory Size)', 'MiB', 'memory', 'uwsgi.memory_vsz', 'stacked'],
'lines': [
- ['memory_vsz', 'memory_vsz', 'absolute', 1, 1024 * 1024]
+ ['memory_vsz', 'memory_vsz', 'absolute', 1, 1 << 20]
]
},
'exceptions': {
@@ -87,15 +83,13 @@ CHARTS = {
class Service(SocketService):
def __init__(self, configuration=None, name=None):
super(Service, self).__init__(configuration=configuration, name=name)
- self.url = self.configuration.get('host', 'localhost')
- self.port = self.configuration.get('port', 1717)
self.order = ORDER
self.definitions = deepcopy(CHARTS)
-
+ self.url = self.configuration.get('host', 'localhost')
+ self.port = self.configuration.get('port', 1717)
# Clear dynamic dimensions, these are added during `_get_data()` to allow adding workers at run-time
for chart in DYNAMIC_CHARTS:
self.definitions[chart]['lines'] = []
-
self.last_result = {}
self.workers = []
diff --git a/collectors/python.d.plugin/uwsgi/uwsgi.conf b/collectors/python.d.plugin/uwsgi/uwsgi.conf
index be1c2ada..7d09e733 100644
--- a/collectors/python.d.plugin/uwsgi/uwsgi.conf
+++ b/collectors/python.d.plugin/uwsgi/uwsgi.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, uwsgi also supports the following:
diff --git a/collectors/python.d.plugin/varnish/README.md b/collectors/python.d.plugin/varnish/README.md
index 96c7cafa..44d64efe 100644
--- a/collectors/python.d.plugin/varnish/README.md
+++ b/collectors/python.d.plugin/varnish/README.md
@@ -64,6 +64,14 @@ It produces:
### configuration
-No configuration is needed.
+Only one parameter is supported:
+
+```yaml
+instance_name: 'name'
+```
+
+The name of the varnishd instance to get logs from. If not specified, the host name is used.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fvarnish%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/varnish/varnish.chart.py b/collectors/python.d.plugin/varnish/varnish.chart.py
index d889c2b3..da678157 100644
--- a/collectors/python.d.plugin/varnish/varnish.chart.py
+++ b/collectors/python.d.plugin/varnish/varnish.chart.py
@@ -8,10 +8,6 @@ import re
from bases.collection import find_binary
from bases.FrameworkServices.ExecutableService import ExecutableService
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
ORDER = [
'session_connections',
@@ -47,7 +43,7 @@ CHARTS = {
]
},
'all_time_hit_rate': {
- 'options': [None, 'All History Hit Rate Ratio', 'percent', 'cache performance',
+ 'options': [None, 'All History Hit Rate Ratio', 'percentage', 'cache performance',
'varnish.all_time_hit_rate', 'stacked'],
'lines': [
['cache_hit', 'hit', 'percentage-of-absolute-row'],
@@ -55,7 +51,7 @@ CHARTS = {
['cache_hitpass', 'hitpass', 'percentage-of-absolute-row']]
},
'current_poll_hit_rate': {
- 'options': [None, 'Current Poll Hit Rate Ratio', 'percent', 'cache performance',
+ 'options': [None, 'Current Poll Hit Rate Ratio', 'percentage', 'cache performance',
'varnish.current_poll_hit_rate', 'stacked'],
'lines': [
['cache_hit', 'hit', 'percentage-of-incremental-row'],
@@ -127,7 +123,7 @@ CHARTS = {
]
},
'memory_usage': {
- 'options': [None, 'Memory Usage', 'MB', 'memory usage', 'varnish.memory_usage', 'stacked'],
+ 'options': [None, 'Memory Usage', 'MiB', 'memory usage', 'varnish.memory_usage', 'stacked'],
'lines': [
['memory_free', 'free', 'absolute', 1, 1 << 20],
['memory_allocated', 'allocated', 'absolute', 1, 1 << 20]]
@@ -140,6 +136,8 @@ CHARTS = {
}
}
+VARNISHSTAT = 'varnishstat'
+
class Parser:
_backend_new = re.compile(r'VBE.([\d\w_.]+)\(.*?\).(beresp[\w_]+)\s+(\d+)')
@@ -176,19 +174,31 @@ class Service(ExecutableService):
ExecutableService.__init__(self, configuration=configuration, name=name)
self.order = ORDER
self.definitions = CHARTS
- varnishstat = find_binary('varnishstat')
- self.command = [varnishstat, '-1'] if varnishstat else None
+ self.instance_name = configuration.get('instance_name')
self.parser = Parser()
+ self.command = None
+
+ def create_command(self):
+ varnishstat = find_binary(VARNISHSTAT)
+
+ if not varnishstat:
+ self.error("can't locate '{0}' binary or binary is not executable by user netdata".format(VARNISHSTAT))
+ return False
+
+ if self.instance_name:
+ self.command = [varnishstat, '-1', '-n', self.instance_name, '-t', '1']
+ else:
+ self.command = [varnishstat, '-1', '-t', '1']
+ return True
def check(self):
- if not self.command:
- self.error("Can't locate 'varnishstat' binary or binary is not executable by user netdata")
+ if not self.create_command():
return False
# STDOUT is not empty
reply = self._get_raw_data()
if not reply:
- self.error("No output from 'varnishstat'. Not enough privileges?")
+ self.error("No output from 'varnishstat'. Is it running? Not enough privileges?")
return False
self.parser.init(reply)
diff --git a/collectors/python.d.plugin/varnish/varnish.conf b/collectors/python.d.plugin/varnish/varnish.conf
index 4b069d51..54bfe4de 100644
--- a/collectors/python.d.plugin/varnish/varnish.conf
+++ b/collectors/python.d.plugin/varnish/varnish.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,11 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
+# Additionally to the above, varnish also supports the following:
+#
+# instance_name: 'name' # the name of the varnishd instance to get logs from. If not specified, the host name is used.
+#
# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/w1sensor/README.md b/collectors/python.d.plugin/w1sensor/README.md
index b18f0835..94717c81 100644
--- a/collectors/python.d.plugin/w1sensor/README.md
+++ b/collectors/python.d.plugin/w1sensor/README.md
@@ -11,3 +11,5 @@ Charts are created dynamically based on the number of detected sensors.
For detailed configuration information please read [`w1sensor.conf`](w1sensor.conf) file.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fw1sensor%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/w1sensor/w1sensor.chart.py b/collectors/python.d.plugin/w1sensor/w1sensor.chart.py
index 493c4a13..e50312fc 100644
--- a/collectors/python.d.plugin/w1sensor/w1sensor.chart.py
+++ b/collectors/python.d.plugin/w1sensor/w1sensor.chart.py
@@ -16,7 +16,9 @@ W1_DIR = '/sys/bus/w1/devices/'
# Lines matching the following regular expression contain a temperature value
RE_TEMP = re.compile(r' t=(\d+)')
-ORDER = ['temp']
+ORDER = [
+ 'temp',
+]
CHARTS = {
'temp': {
diff --git a/collectors/python.d.plugin/w1sensor/w1sensor.conf b/collectors/python.d.plugin/w1sensor/w1sensor.conf
index a4aed8dd..17271001 100644
--- a/collectors/python.d.plugin/w1sensor/w1sensor.conf
+++ b/collectors/python.d.plugin/w1sensor/w1sensor.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 5 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, example also supports the following:
diff --git a/collectors/python.d.plugin/web_log/README.md b/collectors/python.d.plugin/web_log/README.md
index e25a03fb..176551cf 100644
--- a/collectors/python.d.plugin/web_log/README.md
+++ b/collectors/python.d.plugin/web_log/README.md
@@ -21,7 +21,7 @@ netdata turns this "useless" log file, into a powerful performance and health mo
If netdata is installed on a system running a web server, it will detect it and it will automatically present a series of charts, with information obtained from the web server API, like these (*these do not come from the web server log file*):
![image](https://cloud.githubusercontent.com/assets/2662304/22900686/e283f636-f237-11e6-93d2-cbdf63de150c.png)
-*[**netdata**](https://my-netdata.io/) charts based on metrics collected by querying the `nginx` API (i.e. `/stab_status`).*
+*[**netdata**](https://my-netdata.io/) charts based on metrics collected by querying the `nginx` API (i.e. `/stub_status`).*
> [**netdata**](https://my-netdata.io/) supports `apache`, `nginx`, `lighttpd` and `tomcat`. To obtain real-time information from a web server API, the web server needs to expose it. For directions on configuring your web server, check the config files for each web server. There is a directory with a config file for each web server under [`/etc/netdata/python.d/`](../).
@@ -199,3 +199,5 @@ The column `minimum requests` state the minimum number of requests required for
[**netdata**](https://my-netdata.io/) alarms are user configurable. Sample config files can be found under directory `health/health.d` of the netdata github repository. So, even [`web_log` alarms can be adapted to your needs](../../../health/health.d/web_log.conf).
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fweb_log%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/web_log/web_log.chart.py b/collectors/python.d.plugin/web_log/web_log.chart.py
index 20e15f4c..99279046 100644
--- a/collectors/python.d.plugin/web_log/web_log.chart.py
+++ b/collectors/python.d.plugin/web_log/web_log.chart.py
@@ -25,7 +25,9 @@ from bases.collection import read_last_line
from bases.FrameworkServices.LogService import LogService
-ORDER_APACHE_CACHE = ['apache_cache']
+ORDER_APACHE_CACHE = [
+ 'apache_cache',
+]
ORDER_WEB = [
'response_statuses',
@@ -182,7 +184,7 @@ CHARTS_WEB = {
CHARTS_APACHE_CACHE = {
'apache_cache': {
- 'options': [None, 'Apache Cached Responses', 'percent cached', 'cached', 'web_log.apache_cache_cache',
+ 'options': [None, 'Apache Cached Responses', 'percentage', 'cached', 'web_log.apache_cache_cache',
'stacked'],
'lines': [
['hit', 'cache', 'percentage-of-absolute-row'],
diff --git a/collectors/python.d.plugin/web_log/web_log.conf b/collectors/python.d.plugin/web_log/web_log.conf
index a67957ae..0ac17f66 100644
--- a/collectors/python.d.plugin/web_log/web_log.conf
+++ b/collectors/python.d.plugin/web_log/web_log.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -61,7 +59,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, web_log also supports the following:
diff --git a/collectors/statsd.plugin/.keep b/collectors/statsd.plugin/.keep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collectors/statsd.plugin/.keep
diff --git a/collectors/statsd.plugin/Makefile.am b/collectors/statsd.plugin/Makefile.am
index 7f09bacd..e63bf98b 100644
--- a/collectors/statsd.plugin/Makefile.am
+++ b/collectors/statsd.plugin/Makefile.am
@@ -9,12 +9,11 @@ dist_noinst_DATA = \
statsdconfigdir=$(libconfigdir)/statsd.d
dist_statsdconfig_DATA = \
- $(top_srcdir)/installer/.keep \
example.conf \
$(NULL)
userstatsdconfigdir=$(configdir)/statsd.d
dist_userstatsdconfig_DATA = \
- $(top_srcdir)/installer/.keep \
+ .keep \
$(NULL)
diff --git a/collectors/statsd.plugin/Makefile.in b/collectors/statsd.plugin/Makefile.in
deleted file mode 100644
index 5c16a86d..00000000
--- a/collectors/statsd.plugin/Makefile.in
+++ /dev/null
@@ -1,556 +0,0 @@
-# Makefile.in generated by automake 1.14.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2013 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-VPATH = @srcdir@
-am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-subdir = collectors/statsd.plugin
-DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
- $(dist_noinst_DATA) $(dist_statsdconfig_DATA) \
- $(dist_userstatsdconfig_DATA)
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
-am__vpath_adj = case $$p in \
- $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
- *) f=$$p;; \
- esac;
-am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
-am__install_max = 40
-am__nobase_strip_setup = \
- srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
-am__nobase_strip = \
- for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
-am__nobase_list = $(am__nobase_strip_setup); \
- for p in $$list; do echo "$$p $$p"; done | \
- sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
- $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
- if (++n[$$2] == $(am__install_max)) \
- { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
- END { for (dir in files) print dir, files[dir] }'
-am__base_list = \
- sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
- sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
-am__uninstall_files_from_dir = { \
- test -z "$$files" \
- || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
- || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
- $(am__cd) "$$dir" && rm -f $$files; }; \
- }
-am__installdirs = "$(DESTDIR)$(statsdconfigdir)" \
- "$(DESTDIR)$(userstatsdconfigdir)"
-DATA = $(dist_noinst_DATA) $(dist_statsdconfig_DATA) \
- $(dist_userstatsdconfig_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-dist_noinst_DATA = \
- README.md \
- $(NULL)
-
-statsdconfigdir = $(libconfigdir)/statsd.d
-dist_statsdconfig_DATA = \
- $(top_srcdir)/installer/.keep \
- example.conf \
- $(NULL)
-
-userstatsdconfigdir = $(configdir)/statsd.d
-dist_userstatsdconfig_DATA = \
- $(top_srcdir)/installer/.keep \
- $(NULL)
-
-all: all-am
-
-.SUFFIXES:
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/statsd.plugin/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/statsd.plugin/Makefile
-.PRECIOUS: Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-install-dist_statsdconfigDATA: $(dist_statsdconfig_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_statsdconfig_DATA)'; test -n "$(statsdconfigdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(statsdconfigdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(statsdconfigdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(statsdconfigdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(statsdconfigdir)" || exit $$?; \
- done
-
-uninstall-dist_statsdconfigDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_statsdconfig_DATA)'; test -n "$(statsdconfigdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(statsdconfigdir)'; $(am__uninstall_files_from_dir)
-install-dist_userstatsdconfigDATA: $(dist_userstatsdconfig_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_userstatsdconfig_DATA)'; test -n "$(userstatsdconfigdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(userstatsdconfigdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(userstatsdconfigdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(userstatsdconfigdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(userstatsdconfigdir)" || exit $$?; \
- done
-
-uninstall-dist_userstatsdconfigDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_userstatsdconfig_DATA)'; test -n "$(userstatsdconfigdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(userstatsdconfigdir)'; $(am__uninstall_files_from_dir)
-tags TAGS:
-
-ctags CTAGS:
-
-cscope cscopelist:
-
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(DATA)
-installdirs:
- for dir in "$(DESTDIR)$(statsdconfigdir)" "$(DESTDIR)$(userstatsdconfigdir)"; do \
- test -z "$$dir" || $(MKDIR_P) "$$dir"; \
- done
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-am
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-generic
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am: install-dist_statsdconfigDATA \
- install-dist_userstatsdconfigDATA
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am:
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am: uninstall-dist_statsdconfigDATA \
- uninstall-dist_userstatsdconfigDATA
-
-.MAKE: install-am install-strip
-
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dist_statsdconfigDATA \
- install-dist_userstatsdconfigDATA install-dvi install-dvi-am \
- install-exec install-exec-am install-html install-html-am \
- install-info install-info-am install-man install-pdf \
- install-pdf-am install-ps install-ps-am install-strip \
- installcheck installcheck-am installdirs maintainer-clean \
- maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
- pdf-am ps ps-am tags-am uninstall uninstall-am \
- uninstall-dist_statsdconfigDATA \
- uninstall-dist_userstatsdconfigDATA
-
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/statsd.plugin/README.md b/collectors/statsd.plugin/README.md
index 6ef03834..399918dc 100644
--- a/collectors/statsd.plugin/README.md
+++ b/collectors/statsd.plugin/README.md
@@ -1,22 +1,20 @@
-# Netdata Statsd
+# statsd.plugin
statsd is a system to collect data from any application. Applications are sending metrics to it, usually via non-blocking UDP communication, and statsd servers collect these metrics, perform a few simple calculations on them and push them to backend time-series databases.
There is a [plethora of client libraries](https://github.com/etsy/statsd/wiki#client-implementations) for embedding statsd metrics to any application framework. This makes statsd quite popular for custom application metrics.
-## netdata statsd
-
netdata is a fully featured statsd server. It can collect statsd formatted metrics, visualize them on its dashboards, stream them to other netdata servers or archive them to backend time-series databases.
-netdata statsd is inside netdata (an internal plugin, running inside the netdata daemon), it is configured via `netdata.conf` and by-default listens on standard statsd ports (tcp and udp 8125 - yes, netdata statsd server supports both tcp and udp at the same time).
+Netdata statsd is inside Netdata (an internal plugin, running inside the Netdata daemon), it is configured via `netdata.conf` and by-default listens on standard statsd ports (tcp and udp 8125 - yes, Netdata statsd server supports both tcp and udp at the same time).
-Since statsd is embedded in netdata, it means you now have a statsd server embedded on all your servers. So, the application can send its metrics to `localhost:8125`. This provides a distributed statsd implementation.
+Since statsd is embedded in Netdata, it means you now have a statsd server embedded on all your servers. So, the application can send its metrics to `localhost:8125`. This provides a distributed statsd implementation.
-netdata statsd is fast. It can collect more than **1.200.000 metrics per second** on modern hardware, more than **200Mbps of sustained statsd traffic**, using 1 CPU core (yes, it is single threaded - actually double-threaded, one thread collects metrics, another one updates the charts from the collected data).
+Netdata statsd is fast. It can collect more than **1.200.000 metrics per second** on modern hardware, more than **200Mbps of sustained statsd traffic**, using 1 CPU core (yes, it is single threaded - actually double-threaded, one thread collects metrics, another one updates the charts from the collected data).
-## metrics supported by netdata
+## Metrics supported by Netdata
-netdata fully supports the statsd protocol. All statsd client libraries can be used with netdata too.
+Netdata fully supports the statsd protocol. All statsd client libraries can be used with Netdata too.
- **Gauges**
@@ -521,3 +519,5 @@ statsd "metric1:10|g" "metric2:10|c" ...
```
The function is smart enough to call `nc` just once and pass all the metrics to it. It will also automatically switch to TCP if the metrics to send are above 1000 bytes.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fstatsd.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/statsd.plugin/statsd.c b/collectors/statsd.plugin/statsd.c
index c92bfd1c..534466a0 100644
--- a/collectors/statsd.plugin/statsd.c
+++ b/collectors/statsd.plugin/statsd.c
@@ -2383,7 +2383,7 @@ void *statsd_main(void *ptr) {
, "statsd"
, NULL
, "statsd server TCP connected sockets"
- , "connected"
+ , "sockets"
, PLUGIN_STATSD_NAME
, "stats"
, 132016
diff --git a/collectors/tc.plugin/Makefile.in b/collectors/tc.plugin/Makefile.in
deleted file mode 100644
index d336e1f0..00000000
--- a/collectors/tc.plugin/Makefile.in
+++ /dev/null
@@ -1,562 +0,0 @@
-# Makefile.in generated by automake 1.14.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2013 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-
-VPATH = @srcdir@
-am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-DIST_COMMON = $(top_srcdir)/build/subst.inc $(srcdir)/Makefile.in \
- $(srcdir)/Makefile.am $(dist_plugins_SCRIPTS) \
- $(dist_noinst_DATA)
-subdir = collectors/tc.plugin
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
-am__vpath_adj = case $$p in \
- $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
- *) f=$$p;; \
- esac;
-am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
-am__install_max = 40
-am__nobase_strip_setup = \
- srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
-am__nobase_strip = \
- for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
-am__nobase_list = $(am__nobase_strip_setup); \
- for p in $$list; do echo "$$p $$p"; done | \
- sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
- $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
- if (++n[$$2] == $(am__install_max)) \
- { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
- END { for (dir in files) print dir, files[dir] }'
-am__base_list = \
- sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
- sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
-am__uninstall_files_from_dir = { \
- test -z "$$files" \
- || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
- || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
- $(am__cd) "$$dir" && rm -f $$files; }; \
- }
-am__installdirs = "$(DESTDIR)$(pluginsdir)"
-SCRIPTS = $(dist_plugins_SCRIPTS)
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-DATA = $(dist_noinst_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-CLEANFILES = \
- tc-qos-helper.sh \
- $(NULL)
-
-SUFFIXES = .in
-dist_plugins_SCRIPTS = \
- tc-qos-helper.sh \
- $(NULL)
-
-dist_noinst_DATA = \
- tc-qos-helper.sh.in \
- README.md \
- $(NULL)
-
-all: all-am
-
-.SUFFIXES:
-.SUFFIXES: .in
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/tc.plugin/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/tc.plugin/Makefile
-.PRECIOUS: Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-$(top_srcdir)/build/subst.inc:
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS)
- @$(NORMAL_INSTALL)
- @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
- done | \
- sed -e 'p;s,.*/,,;n' \
- -e 'h;s|.*|.|' \
- -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
- $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
- { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
- if ($$2 == $$4) { files[d] = files[d] " " $$1; \
- if (++n[d] == $(am__install_max)) { \
- print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
- else { print "f", d "/" $$4, $$1 } } \
- END { for (d in files) print "f", d, files[d] }' | \
- while read type dir files; do \
- if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
- test -z "$$files" || { \
- echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \
- $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \
- } \
- ; done
-
-uninstall-dist_pluginsSCRIPTS:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \
- files=`for p in $$list; do echo "$$p"; done | \
- sed -e 's,.*/,,;$(transform)'`; \
- dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir)
-tags TAGS:
-
-ctags CTAGS:
-
-cscope cscopelist:
-
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(SCRIPTS) $(DATA)
-installdirs:
- for dir in "$(DESTDIR)$(pluginsdir)"; do \
- test -z "$$dir" || $(MKDIR_P) "$$dir"; \
- done
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
- -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-am
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-generic
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am: install-dist_pluginsSCRIPTS
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am:
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am: uninstall-dist_pluginsSCRIPTS
-
-.MAKE: install-am install-strip
-
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dist_pluginsSCRIPTS install-dvi \
- install-dvi-am install-exec install-exec-am install-html \
- install-html-am install-info install-info-am install-man \
- install-pdf install-pdf-am install-ps install-ps-am \
- install-strip installcheck installcheck-am installdirs \
- maintainer-clean maintainer-clean-generic mostlyclean \
- mostlyclean-generic pdf pdf-am ps ps-am tags-am uninstall \
- uninstall-am uninstall-dist_pluginsSCRIPTS
-
-.in:
- if sed \
- -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \
- -e 's#[@]sbindir_POST@#$(sbindir)#g' \
- -e 's#[@]sysconfdir_POST@#$(sysconfdir)#g' \
- -e 's#[@]pythondir_POST@#$(pythondir)#g' \
- -e 's#[@]configdir_POST@#$(configdir)#g' \
- -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \
- -e 's#[@]cachedir_POST@#$(cachedir)#g' \
- $< > $@.tmp; then \
- mv "$@.tmp" "$@"; \
- else \
- rm -f "$@.tmp"; \
- false; \
- fi
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/tc.plugin/README.md b/collectors/tc.plugin/README.md
index a8b151de..4dc1a1d2 100644
--- a/collectors/tc.plugin/README.md
+++ b/collectors/tc.plugin/README.md
@@ -1,4 +1,4 @@
-## tc.plugin
+# tc.plugin
Live demo - **[see it in action here](https://registry.my-netdata.io/#menu_tc)** !
@@ -6,15 +6,11 @@ Live demo - **[see it in action here](https://registry.my-netdata.io/#menu_tc)**
Netdata monitors `tc` QoS classes for all interfaces.
-If you also use [FireQOS](http://firehol.org/tutorial/fireqos-new-user/) it will collect
-interface and class names.
+If you also use [FireQOS](http://firehol.org/tutorial/fireqos-new-user/) it will collect interface and class names.
-There is a [shell helper](tc-qos-helper.sh.in) for this (all parsing is done by the plugin
-in `C` code - this shell script is just a configuration for the command to run to get `tc` output).
+There is a [shell helper](tc-qos-helper.sh.in) for this (all parsing is done by the plugin in `C` code - this shell script is just a configuration for the command to run to get `tc` output).
-The source of the tc plugin is [here](plugin_tc.c). It is somewhat complex, because a state
-machine was needed to keep track of all the `tc` classes, including the pseudo classes tc
-dynamically creates.
+The source of the tc plugin is [here](plugin_tc.c). It is somewhat complex, because a state machine was needed to keep track of all the `tc` classes, including the pseudo classes tc dynamically creates.
## Motivation
@@ -80,11 +76,16 @@ Once **traffic classification** is applied, we can use **[netdata](https://githu
QoS, is extremely light. You will configure it once, and this is it. It will not bother you again and it will not use any noticeable CPU resources, especially on application and database servers.
----
+This is QoS from a home linux router. Check these features:
+
+1. It is real-time (per second updates)
+2. QoS really works in Linux - check that the `background` traffic is squeezed when `surfing` needs it.
-## QoS in Linux? Have you lost your mind?
+![test2](https://cloud.githubusercontent.com/assets/2662304/14093004/68966020-f553-11e5-98fe-ffee2086fafd.gif)
+
+---
-Yes I know... but no, I have not!
+## QoS in Linux?
Of course, `tc` is probably **the most undocumented, complicated and unfriendly** command in Linux.
@@ -108,17 +109,13 @@ For example, do you know that for matching a simple port range in `tc`, e.g. all
32768/0x8000
```
-I know what you are thinking right now! **And I agree!**
+To do it the hard way, you can go through the [tc configuration steps](#qos-configuration-with-tc). An easier way is to use **[FireQOS](https://firehol.org/tutorial/fireqos-new-user/)**, a tool that simplifies QoS management in Linux.
-This is why I wrote **[FireQOS](https://firehol.org/tutorial/fireqos-new-user/)**, a tool to simplify QoS management in Linux.
+## Qos Configuration with FireHOL
The **[FireHOL](https://firehol.org/)** package already distributes **[FireQOS](https://firehol.org/tutorial/fireqos-new-user/)**. Check the **[FireQOS tutorial](https://firehol.org/tutorial/fireqos-new-user/)** to learn how to write your own QoS configuration.
-With **[FireQOS](https://firehol.org/tutorial/fireqos-new-user/)**, it is **really simple for everyone to use QoS in Linux**. Just install the package `firehol`. It should already be available for your distribution. If not, check the **[FireHOL Installation Guide](https://firehol.org/installing/)**. After that, you will have the `fireqos` command which uses a configuration like the following:
-
-## QoS Configuration
-
-This is the file `/etc/firehol/fireqos.conf` we use at the netdata demo site:
+With **[FireQOS](https://firehol.org/tutorial/fireqos-new-user/)**, it is **really simple for everyone to use QoS in Linux**. Just install the package `firehol`. It should already be available for your distribution. If not, check the **[FireHOL Installation Guide](https://firehol.org/installing/)**. After that, you will have the `fireqos` command which uses a configuration like the following `/etc/firehol/fireqos.conf`, used at the netdata demo site:
```sh
# configure the netdata ports
@@ -166,15 +163,33 @@ And this is what you are going to get:
![image](https://cloud.githubusercontent.com/assets/2662304/14436322/c91d90a4-0024-11e6-9fb1-57cdef1580df.png)
----
-
-## More examples:
+## QoS Configuration with tc
+
+First, setup the tc rules in rc.local using commands to assign different DSCP markings to different classids. You can see one such example in [github issue #4563](https://github.com/netdata/netdata/issues/4563#issuecomment-455711973).
+
+Then, map the classids to names by creating `/etc/iproute2/tc_cls`. For example:
+```2:1 Standard
+2:8 LowPriorityData
+2:10 HighThroughputData
+2:16 OAM
+2:18 LowLatencyData
+2:24 BroadcastVideo
+2:26 MultimediaStreaming
+2:32 RealTimeInteractive
+2:34 MultimediaConferencing
+2:40 Signalling
+2:46 Telephony
+2:48 NetworkControl
+```
-This is QoS from my home linux router. Check these features:
+Add the following configuration option in `/etc/netdata.conf`:
+```[plugin:tc]
+ enable show all classes and qdiscs for all interfaces = yes
+```
-1. It is real-time (per second updates)
-2. QoS really works in Linux - check that the `background` traffic is squeezed when `surfing` needs it.
+Finally, create `/etc/netdata/tc-qos-helper.conf` with this content:
+```tc_show="class"```
-![test2](https://cloud.githubusercontent.com/assets/2662304/14093004/68966020-f553-11e5-98fe-ffee2086fafd.gif)
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Ftc.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/tc.plugin/tc-qos-helper.sh b/collectors/tc.plugin/tc-qos-helper.sh
deleted file mode 100644
index a1a2b914..00000000
--- a/collectors/tc.plugin/tc-qos-helper.sh
+++ /dev/null
@@ -1,315 +0,0 @@
-#!/usr/bin/env bash
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-# This script is a helper to allow netdata collect tc data.
-# tc output parsing has been implemented in C, inside netdata
-# This script allows setting names to dimensions.
-
-export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin"
-export LC_ALL=C
-
-
-# -----------------------------------------------------------------------------
-# logging functions
-
-PROGRAM_FILE="$0"
-PROGRAM_NAME="$(basename $0)"
-PROGRAM_NAME="${PROGRAM_NAME/.plugin}"
-
-logdate() {
- date "+%Y-%m-%d %H:%M:%S"
-}
-
-log() {
- local status="${1}"
- shift
-
- echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}"
-
-}
-
-warning() {
- log WARNING "${@}"
-}
-
-error() {
- log ERROR "${@}"
-}
-
-info() {
- log INFO "${@}"
-}
-
-fatal() {
- log FATAL "${@}"
- exit 1
-}
-
-debug=0
-debug() {
- [ $debug -eq 1 ] && log DEBUG "${@}"
-}
-
-# -----------------------------------------------------------------------------
-# find /var/run/fireqos
-
-# the default
-fireqos_run_dir="/var/run/fireqos"
-
-function realdir {
- local r="$1"
- local t=$(readlink "$r")
-
- while [ "$t" ]
- do
- r=$(cd $(dirname "$r") && cd $(dirname "$t") && pwd -P)/$(basename "$t")
- t=$(readlink "$r")
- done
-
- dirname "$r"
-}
-
-if [ ! -d "${fireqos_run_dir}" ]
- then
-
- # the fireqos executable - we will use it to find its config
- fireqos="$(which fireqos 2>/dev/null || command -v fireqos 2>/dev/null)"
-
- if [ ! -z "${fireqos}" ]
- then
-
- fireqos_exec_dir="$(realdir ${fireqos})"
-
- if [ ! -z "${fireqos_exec_dir}" -a "${fireqos_exec_dir}" != "." -a -f "${fireqos_exec_dir}/install.config" ]
- then
-
- LOCALSTATEDIR=
- source "${fireqos_exec_dir}/install.config"
-
- if [ -d "${LOCALSTATEDIR}/run/fireqos" ]
- then
- fireqos_run_dir="${LOCALSTATEDIR}/run/fireqos"
- else
- warning "FireQoS is installed as '${fireqos}', its installation config at '${fireqos_exec_dir}/install.config' specifies local state data at '${LOCALSTATEDIR}/run/fireqos', but this directory is not found or is not readable (check the permissions of its parents)."
- fi
- else
- warning "Although FireQoS is installed on this system as '${fireqos}', I cannot find/read its installation configuration at '${fireqos_exec_dir}/install.config'."
- fi
- else
- warning "FireQoS is not installed on this system. Use FireQoS to apply traffic QoS and expose the class names to netdata. Check https://github.com/netdata/netdata/tree/master/collectors/tc.plugin#tcplugin"
- fi
-fi
-
-# -----------------------------------------------------------------------------
-
-[ -z "${NETDATA_PLUGINS_DIR}" ] && NETDATA_PLUGINS_DIR="$(dirname "${0}")"
-[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="/usr/local/etc/netdata"
-[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="/usr/local/lib/netdata/conf.d"
-
-plugins_dir="${NETDATA_PLUGINS_DIR}"
-tc="$(which tc 2>/dev/null || command -v tc 2>/dev/null)"
-
-
-# -----------------------------------------------------------------------------
-# user configuration
-
-# time in seconds to refresh QoS class/qdisc names
-qos_get_class_names_every=120
-
-# time in seconds to exit - netdata will restart the script
-qos_exit_every=3600
-
-# what to use? classes or qdiscs?
-tc_show="qdisc" # can also be "class"
-
-
-# -----------------------------------------------------------------------------
-# check if we have a valid number for interval
-
-t=${1}
-update_every=$((t))
-[ $((update_every)) -lt 1 ] && update_every=${NETDATA_UPDATE_EVERY}
-[ $((update_every)) -lt 1 ] && update_every=1
-
-
-# -----------------------------------------------------------------------------
-# allow the user to override our defaults
-
-for CONFIG in "${NETDATA_STOCK_CONFIG_DIR}/tc-qos-helper.conf" "${NETDATA_USER_CONFIG_DIR}/tc-qos-helper.conf"
-do
- if [ -f "${CONFIG}" ]
- then
- info "Loading config file '${CONFIG}'..."
- source "${CONFIG}"
- [ $? -ne 0 ] && error "Failed to load config file '${CONFIG}'."
- else
- warning "Cannot find file '${CONFIG}'."
- fi
-done
-
-case "${tc_show}" in
- qdisc|class)
- ;;
-
- *)
- error "tc_show variable can be either 'qdisc' or 'class' but is set to '${tc_show}'. Assuming it is 'qdisc'."
- tc_show="qdisc"
- ;;
-esac
-
-
-# -----------------------------------------------------------------------------
-# default sleep function
-
-LOOPSLEEPMS_LASTWORK=0
-loopsleepms() {
- sleep $1
-}
-
-# if found and included, this file overwrites loopsleepms()
-# with a high resolution timer function for precise looping.
-. "${plugins_dir}/loopsleepms.sh.inc"
-
-
-# -----------------------------------------------------------------------------
-# final checks we can run
-
-if [ -z "${tc}" -o ! -x "${tc}" ]
- then
- fatal "cannot find command 'tc' in this system."
-fi
-
-tc_devices=
-fix_names=
-
-# -----------------------------------------------------------------------------
-
-setclassname() {
- if [ "${tc_show}" = "qdisc" ]
- then
- echo "SETCLASSNAME $4 $2"
- else
- echo "SETCLASSNAME $3 $2"
- fi
-}
-
-show_tc_cls() {
- [ "${tc_show}" = "qdisc" ] && return 1
-
- local x="${1}"
-
- if [ -f /etc/iproute2/tc_cls ]
- then
- local classid name rest
- while read classid name rest
- do
- [ -z "${classid}" -o -z "${name}" -o "${classid}" = "#" -o "${name}" = "#" -o "${classid:0:1}" = "#" -o "${name:0:1}" = "#" ] && continue
- setclassname "" "${name}" "${classid}"
- done </etc/iproute2/tc_cls
- return 0
- fi
- return 1
-}
-
-show_fireqos_names() {
- local x="${1}" name n interface_dev interface_classes interface_classes_monitor
-
- if [ -f "${fireqos_run_dir}/ifaces/${x}" ]
- then
- name="$(<"${fireqos_run_dir}/ifaces/${x}")"
- echo "SETDEVICENAME ${name}"
-
- interface_dev=
- interface_classes=
- interface_classes_monitor=
- source "${fireqos_run_dir}/${name}.conf"
- for n in ${interface_classes_monitor}
- do
- setclassname ${n//|/ }
- done
- [ ! -z "${interface_dev}" ] && echo "SETDEVICEGROUP ${interface_dev}"
-
- return 0
- fi
-
- return 1
-}
-
-show_tc() {
- local x="${1}"
-
- echo "BEGIN ${x}"
-
- # netdata can parse the output of tc
- ${tc} -s ${tc_show} show dev ${x}
-
- # check FireQOS names for classes
- if [ ! -z "${fix_names}" ]
- then
- show_fireqos_names "${x}" || show_tc_cls "${x}"
- fi
-
- echo "END ${x}"
-}
-
-find_tc_devices() {
- local count=0 devs= dev rest l
-
- # find all the devices in the system
- # without forking
- while IFS=":| " read dev rest
- do
- count=$((count + 1))
- [ ${count} -le 2 ] && continue
- devs="${devs} ${dev}"
- done </proc/net/dev
-
- # from all the devices find the ones
- # that have QoS defined
- # unfortunately, one fork per device cannot be avoided
- tc_devices=
- for dev in ${devs}
- do
- l="$(${tc} class show dev ${dev} 2>/dev/null)"
- [ ! -z "${l}" ] && tc_devices="${tc_devices} ${dev}"
- done
-}
-
-# update devices and class names
-# once every 2 minutes
-names_every=$((qos_get_class_names_every / update_every))
-
-# exit this script every hour
-# it will be restarted automatically
-exit_after=$((qos_exit_every / update_every))
-
-c=0
-gc=0
-while [ 1 ]
-do
- fix_names=
- c=$((c + 1))
- gc=$((gc + 1))
-
- if [ ${c} -le 1 -o ${c} -ge ${names_every} ]
- then
- c=1
- fix_names="YES"
- find_tc_devices
- fi
-
- for d in ${tc_devices}
- do
- show_tc ${d}
- done
-
- echo "WORKTIME ${LOOPSLEEPMS_LASTWORK}"
-
- loopsleepms ${update_every}
-
- [ ${gc} -gt ${exit_after} ] && exit 0
-done
diff --git a/collectors/tc.plugin/tc-qos-helper.sh.in b/collectors/tc.plugin/tc-qos-helper.sh.in
index a15eab89..01353be4 100755
--- a/collectors/tc.plugin/tc-qos-helper.sh.in
+++ b/collectors/tc.plugin/tc-qos-helper.sh.in
@@ -12,46 +12,44 @@
export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin"
export LC_ALL=C
-
# -----------------------------------------------------------------------------
# logging functions
-PROGRAM_FILE="$0"
-PROGRAM_NAME="$(basename $0)"
-PROGRAM_NAME="${PROGRAM_NAME/.plugin}"
+PROGRAM_NAME="$(basename "$0")"
+PROGRAM_NAME="${PROGRAM_NAME/.plugin/}"
logdate() {
- date "+%Y-%m-%d %H:%M:%S"
+ date "+%Y-%m-%d %H:%M:%S"
}
log() {
- local status="${1}"
- shift
+ local status="${1}"
+ shift
- echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}"
+ echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}"
}
warning() {
- log WARNING "${@}"
+ log WARNING "${@}"
}
error() {
- log ERROR "${@}"
+ log ERROR "${@}"
}
info() {
- log INFO "${@}"
+ log INFO "${@}"
}
fatal() {
- log FATAL "${@}"
- exit 1
+ log FATAL "${@}"
+ exit 1
}
debug=0
debug() {
- [ $debug -eq 1 ] && log DEBUG "${@}"
+ [ $debug -eq 1 ] && log DEBUG "${@}"
}
# -----------------------------------------------------------------------------
@@ -60,59 +58,55 @@ debug() {
# the default
fireqos_run_dir="/var/run/fireqos"
-function realdir {
- local r="$1"
- local t=$(readlink "$r")
+function realdir() {
+ local r
+ local t
+ r="$1"
+ t="$(readlink "$r")"
- while [ "$t" ]
- do
- r=$(cd $(dirname "$r") && cd $(dirname "$t") && pwd -P)/$(basename "$t")
- t=$(readlink "$r")
- done
+ while [ "$t" ]; do
+ r=$(cd "$(dirname "$r")" && cd "$(dirname "$t")" && pwd -P)/$(basename "$t")
+ t=$(readlink "$r")
+ done
- dirname "$r"
+ dirname "$r"
}
-if [ ! -d "${fireqos_run_dir}" ]
- then
-
- # the fireqos executable - we will use it to find its config
- fireqos="$(which fireqos 2>/dev/null || command -v fireqos 2>/dev/null)"
+if [ ! -d "${fireqos_run_dir}" ]; then
- if [ ! -z "${fireqos}" ]
- then
+ # the fireqos executable - we will use it to find its config
+ fireqos="$(command -v fireqos 2>/dev/null)"
- fireqos_exec_dir="$(realdir ${fireqos})"
+ if [ -n "${fireqos}" ]; then
- if [ ! -z "${fireqos_exec_dir}" -a "${fireqos_exec_dir}" != "." -a -f "${fireqos_exec_dir}/install.config" ]
- then
+ fireqos_exec_dir="$(realdir "${fireqos}")"
- LOCALSTATEDIR=
- source "${fireqos_exec_dir}/install.config"
+ if [ -n "${fireqos_exec_dir}" ] && [ "${fireqos_exec_dir}" != "." ] && [ -f "${fireqos_exec_dir}/install.config" ]; then
+ LOCALSTATEDIR=
+ #shellcheck source=/dev/null
+ source "${fireqos_exec_dir}/install.config"
- if [ -d "${LOCALSTATEDIR}/run/fireqos" ]
- then
- fireqos_run_dir="${LOCALSTATEDIR}/run/fireqos"
- else
- warning "FireQoS is installed as '${fireqos}', its installation config at '${fireqos_exec_dir}/install.config' specifies local state data at '${LOCALSTATEDIR}/run/fireqos', but this directory is not found or is not readable (check the permissions of its parents)."
- fi
- else
- warning "Although FireQoS is installed on this system as '${fireqos}', I cannot find/read its installation configuration at '${fireqos_exec_dir}/install.config'."
- fi
- else
- warning "FireQoS is not installed on this system. Use FireQoS to apply traffic QoS and expose the class names to netdata. Check https://github.com/netdata/netdata/tree/master/collectors/tc.plugin#tcplugin"
- fi
+ if [ -d "${LOCALSTATEDIR}/run/fireqos" ]; then
+ fireqos_run_dir="${LOCALSTATEDIR}/run/fireqos"
+ else
+ warning "FireQoS is installed as '${fireqos}', its installation config at '${fireqos_exec_dir}/install.config' specifies local state data at '${LOCALSTATEDIR}/run/fireqos', but this directory is not found or is not readable (check the permissions of its parents)."
+ fi
+ else
+ warning "Although FireQoS is installed on this system as '${fireqos}', I cannot find/read its installation configuration at '${fireqos_exec_dir}/install.config'."
+ fi
+ else
+ warning "FireQoS is not installed on this system. Use FireQoS to apply traffic QoS and expose the class names to netdata. Check https://github.com/netdata/netdata/tree/master/collectors/tc.plugin#tcplugin"
+ fi
fi
# -----------------------------------------------------------------------------
[ -z "${NETDATA_PLUGINS_DIR}" ] && NETDATA_PLUGINS_DIR="$(dirname "${0}")"
-[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="@configdir_POST@"
+[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="@configdir_POST@"
[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="@libconfigdir_POST@"
plugins_dir="${NETDATA_PLUGINS_DIR}"
-tc="$(which tc 2>/dev/null || command -v tc 2>/dev/null)"
-
+tc="$(command -v tc 2>/dev/null)"
# -----------------------------------------------------------------------------
# user configuration
@@ -126,7 +120,6 @@ qos_exit_every=3600
# what to use? classes or qdiscs?
tc_show="qdisc" # can also be "class"
-
# -----------------------------------------------------------------------------
# check if we have a valid number for interval
@@ -135,52 +128,46 @@ update_every=$((t))
[ $((update_every)) -lt 1 ] && update_every=${NETDATA_UPDATE_EVERY}
[ $((update_every)) -lt 1 ] && update_every=1
-
# -----------------------------------------------------------------------------
# allow the user to override our defaults
-for CONFIG in "${NETDATA_STOCK_CONFIG_DIR}/tc-qos-helper.conf" "${NETDATA_USER_CONFIG_DIR}/tc-qos-helper.conf"
-do
- if [ -f "${CONFIG}" ]
- then
- info "Loading config file '${CONFIG}'..."
- source "${CONFIG}"
- [ $? -ne 0 ] && error "Failed to load config file '${CONFIG}'."
- else
- warning "Cannot find file '${CONFIG}'."
- fi
+for CONFIG in "${NETDATA_STOCK_CONFIG_DIR}/tc-qos-helper.conf" "${NETDATA_USER_CONFIG_DIR}/tc-qos-helper.conf"; do
+ if [ -f "${CONFIG}" ]; then
+ info "Loading config file '${CONFIG}'..."
+ #shellcheck source=/dev/null
+ source "${CONFIG}" || error "Failed to load config file '${CONFIG}'."
+ else
+ warning "Cannot find file '${CONFIG}'."
+ fi
done
case "${tc_show}" in
- qdisc|class)
- ;;
+qdisc | class) ;;
- *)
- error "tc_show variable can be either 'qdisc' or 'class' but is set to '${tc_show}'. Assuming it is 'qdisc'."
- tc_show="qdisc"
- ;;
+*)
+ error "tc_show variable can be either 'qdisc' or 'class' but is set to '${tc_show}'. Assuming it is 'qdisc'."
+ tc_show="qdisc"
+ ;;
esac
-
# -----------------------------------------------------------------------------
# default sleep function
LOOPSLEEPMS_LASTWORK=0
loopsleepms() {
- sleep $1
+ sleep "$1"
}
# if found and included, this file overwrites loopsleepms()
# with a high resolution timer function for precise looping.
+#shellcheck source=/dev/null
. "${plugins_dir}/loopsleepms.sh.inc"
-
# -----------------------------------------------------------------------------
# final checks we can run
-if [ -z "${tc}" -o ! -x "${tc}" ]
- then
- fatal "cannot find command 'tc' in this system."
+if [ -z "${tc}" ] || [ ! -x "${tc}" ]; then
+ fatal "cannot find command 'tc' in this system."
fi
tc_devices=
@@ -189,94 +176,91 @@ fix_names=
# -----------------------------------------------------------------------------
setclassname() {
- if [ "${tc_show}" = "qdisc" ]
- then
- echo "SETCLASSNAME $4 $2"
- else
- echo "SETCLASSNAME $3 $2"
- fi
+ if [ "${tc_show}" = "qdisc" ]; then
+ echo "SETCLASSNAME $4 $2"
+ else
+ echo "SETCLASSNAME $3 $2"
+ fi
}
show_tc_cls() {
- [ "${tc_show}" = "qdisc" ] && return 1
-
- local x="${1}"
-
- if [ -f /etc/iproute2/tc_cls ]
- then
- local classid name rest
- while read classid name rest
- do
- [ -z "${classid}" -o -z "${name}" -o "${classid}" = "#" -o "${name}" = "#" -o "${classid:0:1}" = "#" -o "${name:0:1}" = "#" ] && continue
- setclassname "" "${name}" "${classid}"
- done </etc/iproute2/tc_cls
- return 0
- fi
- return 1
+ [ "${tc_show}" = "qdisc" ] && return 1
+
+ local x="${1}"
+
+ if [ -f /etc/iproute2/tc_cls ]; then
+ local classid name rest
+ while read -r classid name rest; do
+ if [ -z "${classid}" ] ||
+ [ -z "${name}" ] ||
+ [ "${classid}" = "#" ] ||
+ [ "${name}" = "#" ] ||
+ [ "${classid:0:1}" = "#" ] ||
+ [ "${name:0:1}" = "#" ]; then
+ continue
+ fi
+ setclassname "" "${name}" "${classid}"
+ done </etc/iproute2/tc_cls
+ return 0
+ fi
+ return 1
}
show_fireqos_names() {
- local x="${1}" name n interface_dev interface_classes interface_classes_monitor
-
- if [ -f "${fireqos_run_dir}/ifaces/${x}" ]
- then
- name="$(<"${fireqos_run_dir}/ifaces/${x}")"
- echo "SETDEVICENAME ${name}"
-
- interface_dev=
- interface_classes=
- interface_classes_monitor=
- source "${fireqos_run_dir}/${name}.conf"
- for n in ${interface_classes_monitor}
- do
- setclassname ${n//|/ }
- done
- [ ! -z "${interface_dev}" ] && echo "SETDEVICEGROUP ${interface_dev}"
-
- return 0
- fi
-
- return 1
+ local x="${1}" name n interface_dev interface_classes_monitor
+
+ if [ -f "${fireqos_run_dir}/ifaces/${x}" ]; then
+ name="$(<"${fireqos_run_dir}/ifaces/${x}")"
+ echo "SETDEVICENAME ${name}"
+
+ #shellcheck source=/dev/null
+ source "${fireqos_run_dir}/${name}.conf"
+ for n in ${interface_classes_monitor}; do
+ setclassname ${n//|/ }
+ done
+ [ -n "${interface_dev}" ] && echo "SETDEVICEGROUP ${interface_dev}"
+
+ return 0
+ fi
+
+ return 1
}
show_tc() {
- local x="${1}"
+ local x="${1}"
- echo "BEGIN ${x}"
+ echo "BEGIN ${x}"
- # netdata can parse the output of tc
- ${tc} -s ${tc_show} show dev ${x}
+ # netdata can parse the output of tc
+ ${tc} -s ${tc_show} show dev "${x}"
- # check FireQOS names for classes
- if [ ! -z "${fix_names}" ]
- then
- show_fireqos_names "${x}" || show_tc_cls "${x}"
- fi
+ # check FireQOS names for classes
+ if [ -n "${fix_names}" ]; then
+ show_fireqos_names "${x}" || show_tc_cls "${x}"
+ fi
- echo "END ${x}"
+ echo "END ${x}"
}
find_tc_devices() {
- local count=0 devs= dev rest l
-
- # find all the devices in the system
- # without forking
- while IFS=":| " read dev rest
- do
- count=$((count + 1))
- [ ${count} -le 2 ] && continue
- devs="${devs} ${dev}"
- done </proc/net/dev
-
- # from all the devices find the ones
- # that have QoS defined
- # unfortunately, one fork per device cannot be avoided
- tc_devices=
- for dev in ${devs}
- do
- l="$(${tc} class show dev ${dev} 2>/dev/null)"
- [ ! -z "${l}" ] && tc_devices="${tc_devices} ${dev}"
- done
+ local count=0 devs dev rest l
+
+ # find all the devices in the system
+ # without forking
+ while IFS=":| " read -r dev rest; do
+ count=$((count + 1))
+ [ ${count} -le 2 ] && continue
+ devs="${devs} ${dev}"
+ done </proc/net/dev
+
+ # from all the devices find the ones
+ # that have QoS defined
+ # unfortunately, one fork per device cannot be avoided
+ tc_devices=
+ for dev in ${devs}; do
+ l="$(${tc} class show dev "${dev}" 2>/dev/null)"
+ [ -n "${l}" ] && tc_devices="${tc_devices} ${dev}"
+ done
}
# update devices and class names
@@ -289,27 +273,24 @@ exit_after=$((qos_exit_every / update_every))
c=0
gc=0
-while [ 1 ]
-do
- fix_names=
- c=$((c + 1))
- gc=$((gc + 1))
+while true; do
+ fix_names=
+ c=$((c + 1))
+ gc=$((gc + 1))
- if [ ${c} -le 1 -o ${c} -ge ${names_every} ]
- then
- c=1
- fix_names="YES"
- find_tc_devices
- fi
+ if [ ${c} -le 1 ] || [ ${c} -ge ${names_every} ]; then
+ c=1
+ fix_names="YES"
+ find_tc_devices
+ fi
- for d in ${tc_devices}
- do
- show_tc ${d}
- done
+ for d in ${tc_devices}; do
+ show_tc "${d}"
+ done
- echo "WORKTIME ${LOOPSLEEPMS_LASTWORK}"
+ echo "WORKTIME ${LOOPSLEEPMS_LASTWORK}"
- loopsleepms ${update_every}
+ loopsleepms ${update_every}
- [ ${gc} -gt ${exit_after} ] && exit 0
+ [ ${gc} -gt ${exit_after} ] && exit 0
done